1/*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 *
4 * kernel entry points (interruptions, system call wrappers)
5 *  Copyright (C) 1999,2000 Philipp Rumpf
6 *  Copyright (C) 1999 SuSE GmbH Nuernberg
7 *  Copyright (C) 2000 Hewlett-Packard (John Marvin)
8 *  Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
9 *
10 *    This program is free software; you can redistribute it and/or modify
11 *    it under the terms of the GNU General Public License as published by
12 *    the Free Software Foundation; either version 2, or (at your option)
13 *    any later version.
14 *
15 *    This program is distributed in the hope that it will be useful,
16 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
17 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18 *    GNU General Public License for more details.
19 *
20 *    You should have received a copy of the GNU General Public License
21 *    along with this program; if not, write to the Free Software
22 *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#include <asm/asm-offsets.h>
26
27/* we have the following possibilities to act on an interruption:
28 *  - handle in assembly and use shadowed registers only
29 *  - save registers to kernel stack and handle in assembly or C */
30
31
32#include <asm/psw.h>
33#include <asm/cache.h>		/* for L1_CACHE_SHIFT */
34#include <asm/assembly.h>	/* for LDREG/STREG defines */
35#include <asm/pgtable.h>
36#include <asm/signal.h>
37#include <asm/unistd.h>
38#include <asm/thread_info.h>
39
40#include <linux/linkage.h>
41
42#ifdef CONFIG_64BIT
43#define CMPIB           cmpib,*
44#define CMPB            cmpb,*
45#define COND(x)		*x
46
47	.level 2.0w
48#else
49#define CMPIB           cmpib,
50#define CMPB            cmpb,
51#define COND(x)		x
52
53	.level 2.0
54#endif
55
56	.import         pa_dbit_lock,data
57
58	/* space_to_prot macro creates a prot id from a space id */
59
60#if SPACEID_SHIFT == 0
61	.macro  space_to_prot spc prot
62	depd,z  \spc,62,31,\prot
63	.endm
64#else
65	.macro  space_to_prot spc prot
66	extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
67	.endm
68#endif
69
70	/* Switch to virtual mapping, trashing only %r1 */
71	.macro  virt_map
72	/* pcxt_ssm_bug */
73	rsm	PSW_SM_I, %r0	/* barrier for "Relied upon Translation */
74	mtsp	%r0, %sr4
75	mtsp	%r0, %sr5
76	mfsp	%sr7, %r1
77	or,=    %r0,%r1,%r0	/* Only save sr7 in sr3 if sr7 != 0 */
78	mtsp	%r1, %sr3
79	tovirt_r1 %r29
80	load32	KERNEL_PSW, %r1
81
82	rsm     PSW_SM_QUIET,%r0	/* second "heavy weight" ctl op */
83	mtsp	%r0, %sr6
84	mtsp	%r0, %sr7
85	mtctl	%r0, %cr17	/* Clear IIASQ tail */
86	mtctl	%r0, %cr17	/* Clear IIASQ head */
87	mtctl	%r1, %ipsw
88	load32	4f, %r1
89	mtctl	%r1, %cr18	/* Set IIAOQ tail */
90	ldo	4(%r1), %r1
91	mtctl	%r1, %cr18	/* Set IIAOQ head */
92	rfir
93	nop
944:
95	.endm
96
97	/*
98	 * The "get_stack" macros are responsible for determining the
99	 * kernel stack value.
100	 *
101	 * For Faults:
102	 *      If sr7 == 0
103	 *          Already using a kernel stack, so call the
104	 *          get_stack_use_r30 macro to push a pt_regs structure
105	 *          on the stack, and store registers there.
106	 *      else
107	 *          Need to set up a kernel stack, so call the
108	 *          get_stack_use_cr30 macro to set up a pointer
109	 *          to the pt_regs structure contained within the
110	 *          task pointer pointed to by cr30. Set the stack
111	 *          pointer to point to the end of the task structure.
112	 *
113	 * For Interrupts:
114	 *      If sr7 == 0
115	 *          Already using a kernel stack, check to see if r30
116	 *          is already pointing to the per processor interrupt
117	 *          stack. If it is, call the get_stack_use_r30 macro
118	 *          to push a pt_regs structure on the stack, and store
119	 *          registers there. Otherwise, call get_stack_use_cr31
120	 *          to get a pointer to the base of the interrupt stack
121	 *          and push a pt_regs structure on that stack.
122	 *      else
123	 *          Need to set up a kernel stack, so call the
124	 *          get_stack_use_cr30 macro to set up a pointer
125	 *          to the pt_regs structure contained within the
126	 *          task pointer pointed to by cr30. Set the stack
127	 *          pointer to point to the end of the task structure.
128	 *          N.B: We don't use the interrupt stack for the
129	 *          first interrupt from userland, because signals/
130	 *          resched's are processed when returning to userland,
131	 *          and we can sleep in those cases.
132	 *
133	 * Note that we use shadowed registers for temps until
134	 * we can save %r26 and %r29. %r26 is used to preserve
135	 * %r8 (a shadowed register) which temporarily contained
136	 * either the fault type ("code") or the eirr. We need
137	 * to use a non-shadowed register to carry the value over
138	 * the rfir in virt_map. We use %r26 since this value winds
139	 * up being passed as the argument to either do_cpu_irq_mask
140	 * or handle_interruption. %r29 is used to hold a pointer
141	 * the register save area, and once again, it needs to
142	 * be a non-shadowed register so that it survives the rfir.
143	 *
144	 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
145	 */
146
147	.macro  get_stack_use_cr30
148
149	/* we save the registers in the task struct */
150
151	mfctl   %cr30, %r1
152	tophys  %r1,%r9
153	LDREG	TI_TASK(%r9), %r1	/* thread_info -> task_struct */
154	tophys  %r1,%r9
155	ldo     TASK_REGS(%r9),%r9
156	STREG   %r30, PT_GR30(%r9)
157	STREG   %r29,PT_GR29(%r9)
158	STREG   %r26,PT_GR26(%r9)
159	copy    %r9,%r29
160	mfctl   %cr30, %r1
161	ldo	THREAD_SZ_ALGN(%r1), %r30
162	.endm
163
164	.macro  get_stack_use_r30
165
166	/* we put a struct pt_regs on the stack and save the registers there */
167
168	tophys  %r30,%r9
169	STREG   %r30,PT_GR30(%r9)
170	ldo	PT_SZ_ALGN(%r30),%r30
171	STREG   %r29,PT_GR29(%r9)
172	STREG   %r26,PT_GR26(%r9)
173	copy    %r9,%r29
174	.endm
175
176	.macro  rest_stack
177	LDREG   PT_GR1(%r29), %r1
178	LDREG   PT_GR30(%r29),%r30
179	LDREG   PT_GR29(%r29),%r29
180	.endm
181
182	/* default interruption handler
183	 * (calls traps.c:handle_interruption) */
184	.macro	def code
185	b	intr_save
186	ldi     \code, %r8
187	.align	32
188	.endm
189
190	/* Interrupt interruption handler
191	 * (calls irq.c:do_cpu_irq_mask) */
192	.macro	extint code
193	b	intr_extint
194	mfsp    %sr7,%r16
195	.align	32
196	.endm
197
198	.import	os_hpmc, code
199
200	/* HPMC handler */
201	.macro	hpmc code
202	nop			/* must be a NOP, will be patched later */
203	load32	PA(os_hpmc), %r3
204	bv,n	0(%r3)
205	nop
206	.word	0		/* checksum (will be patched) */
207	.word	PA(os_hpmc)	/* address of handler */
208	.word	0		/* length of handler */
209	.endm
210
211	/*
212	 * Performance Note: Instructions will be moved up into
213	 * this part of the code later on, once we are sure
214	 * that the tlb miss handlers are close to final form.
215	 */
216
217	/* Register definitions for tlb miss handler macros */
218
219	va  = r8	/* virtual address for which the trap occured */
220	spc = r24	/* space for which the trap occured */
221
222#ifndef CONFIG_64BIT
223
224	/*
225	 * itlb miss interruption handler (parisc 1.1 - 32 bit)
226	 */
227
228	.macro	itlb_11 code
229
230	mfctl	%pcsq, spc
231	b	itlb_miss_11
232	mfctl	%pcoq, va
233
234	.align		32
235	.endm
236#endif
237
238	/*
239	 * itlb miss interruption handler (parisc 2.0)
240	 */
241
242	.macro	itlb_20 code
243	mfctl	%pcsq, spc
244#ifdef CONFIG_64BIT
245	b       itlb_miss_20w
246#else
247	b	itlb_miss_20
248#endif
249	mfctl	%pcoq, va
250
251	.align		32
252	.endm
253
254#ifndef CONFIG_64BIT
255	/*
256	 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
257	 *
258	 * Note: naitlb misses will be treated
259	 * as an ordinary itlb miss for now.
260	 * However, note that naitlb misses
261	 * have the faulting address in the
262	 * IOR/ISR.
263	 */
264
265	.macro	naitlb_11 code
266
267	mfctl	%isr,spc
268	b	itlb_miss_11
269	mfctl 	%ior,va
270
271	.align		32
272	.endm
273#endif
274
275	/*
276	 * naitlb miss interruption handler (parisc 2.0)
277	 *
278	 * Note: naitlb misses will be treated
279	 * as an ordinary itlb miss for now.
280	 * However, note that naitlb misses
281	 * have the faulting address in the
282	 * IOR/ISR.
283	 */
284
285	.macro	naitlb_20 code
286
287	mfctl	%isr,spc
288#ifdef CONFIG_64BIT
289	b       itlb_miss_20w
290#else
291	b	itlb_miss_20
292#endif
293	mfctl 	%ior,va
294
295	.align		32
296	.endm
297
298#ifndef CONFIG_64BIT
299	/*
300	 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
301	 */
302
303	.macro	dtlb_11 code
304
305	mfctl	%isr, spc
306	b	dtlb_miss_11
307	mfctl	%ior, va
308
309	.align		32
310	.endm
311#endif
312
313	/*
314	 * dtlb miss interruption handler (parisc 2.0)
315	 */
316
317	.macro	dtlb_20 code
318
319	mfctl	%isr, spc
320#ifdef CONFIG_64BIT
321	b       dtlb_miss_20w
322#else
323	b	dtlb_miss_20
324#endif
325	mfctl	%ior, va
326
327	.align		32
328	.endm
329
330#ifndef CONFIG_64BIT
331	/* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
332
333	.macro	nadtlb_11 code
334
335	mfctl	%isr,spc
336	b       nadtlb_miss_11
337	mfctl	%ior,va
338
339	.align		32
340	.endm
341#endif
342
343	/* nadtlb miss interruption handler (parisc 2.0) */
344
345	.macro	nadtlb_20 code
346
347	mfctl	%isr,spc
348#ifdef CONFIG_64BIT
349	b       nadtlb_miss_20w
350#else
351	b       nadtlb_miss_20
352#endif
353	mfctl	%ior,va
354
355	.align		32
356	.endm
357
358#ifndef CONFIG_64BIT
359	/*
360	 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
361	 */
362
363	.macro	dbit_11 code
364
365	mfctl	%isr,spc
366	b	dbit_trap_11
367	mfctl	%ior,va
368
369	.align		32
370	.endm
371#endif
372
373	/*
374	 * dirty bit trap interruption handler (parisc 2.0)
375	 */
376
377	.macro	dbit_20 code
378
379	mfctl	%isr,spc
380#ifdef CONFIG_64BIT
381	b       dbit_trap_20w
382#else
383	b	dbit_trap_20
384#endif
385	mfctl	%ior,va
386
387	.align		32
388	.endm
389
390	/* The following are simple 32 vs 64 bit instruction
391	 * abstractions for the macros */
392	.macro		EXTR	reg1,start,length,reg2
393#ifdef CONFIG_64BIT
394	extrd,u		\reg1,32+\start,\length,\reg2
395#else
396	extrw,u		\reg1,\start,\length,\reg2
397#endif
398	.endm
399
400	.macro		DEP	reg1,start,length,reg2
401#ifdef CONFIG_64BIT
402	depd		\reg1,32+\start,\length,\reg2
403#else
404	depw		\reg1,\start,\length,\reg2
405#endif
406	.endm
407
408	.macro		DEPI	val,start,length,reg
409#ifdef CONFIG_64BIT
410	depdi		\val,32+\start,\length,\reg
411#else
412	depwi		\val,\start,\length,\reg
413#endif
414	.endm
415
416	/* In LP64, the space contains part of the upper 32 bits of the
417	 * fault.  We have to extract this and place it in the va,
418	 * zeroing the corresponding bits in the space register */
419	.macro		space_adjust	spc,va,tmp
420#ifdef CONFIG_64BIT
421	extrd,u		\spc,63,SPACEID_SHIFT,\tmp
422	depd		%r0,63,SPACEID_SHIFT,\spc
423	depd		\tmp,31,SPACEID_SHIFT,\va
424#endif
425	.endm
426
427	.import		swapper_pg_dir,code
428
429	/* Get the pgd.  For faults on space zero (kernel space), this
430	 * is simply swapper_pg_dir.  For user space faults, the
431	 * pgd is stored in %cr25 */
432	.macro		get_pgd		spc,reg
433	ldil		L%PA(swapper_pg_dir),\reg
434	ldo		R%PA(swapper_pg_dir)(\reg),\reg
435	or,COND(=)	%r0,\spc,%r0
436	mfctl		%cr25,\reg
437	.endm
438
439	/*
440		space_check(spc,tmp,fault)
441
442		spc - The space we saw the fault with.
443		tmp - The place to store the current space.
444		fault - Function to call on failure.
445
446		Only allow faults on different spaces from the
447		currently active one if we're the kernel
448
449	*/
450	.macro		space_check	spc,tmp,fault
451	mfsp		%sr7,\tmp
452	or,COND(<>)	%r0,\spc,%r0	/* user may execute gateway page
453					 * as kernel, so defeat the space
454					 * check if it is */
455	copy		\spc,\tmp
456	or,COND(=)	%r0,\tmp,%r0	/* nullify if executing as kernel */
457	cmpb,COND(<>),n	\tmp,\spc,\fault
458	.endm
459
460	/* Look up a PTE in a 2-Level scheme (faulting at each
461	 * level if the entry isn't present
462	 *
463	 * NOTE: we use ldw even for LP64, since the short pointers
464	 * can address up to 1TB
465	 */
466	.macro		L2_ptep	pmd,pte,index,va,fault
467#if PT_NLEVELS == 3
468	EXTR		\va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
469#else
470	EXTR		\va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
471#endif
472	DEP             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
473	copy		%r0,\pte
474	ldw,s		\index(\pmd),\pmd
475	bb,>=,n		\pmd,_PxD_PRESENT_BIT,\fault
476	DEP		%r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
477	copy		\pmd,%r9
478	SHLREG		%r9,PxD_VALUE_SHIFT,\pmd
479	EXTR		\va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
480	DEP		%r0,31,PAGE_SHIFT,\pmd  /* clear offset */
481	shladd		\index,BITS_PER_PTE_ENTRY,\pmd,\pmd
482	LDREG		%r0(\pmd),\pte		/* pmd is now pte */
483	bb,>=,n		\pte,_PAGE_PRESENT_BIT,\fault
484	.endm
485
486	/* Look up PTE in a 3-Level scheme.
487	 *
488	 * Here we implement a Hybrid L2/L3 scheme: we allocate the
489	 * first pmd adjacent to the pgd.  This means that we can
490	 * subtract a constant offset to get to it.  The pmd and pgd
491	 * sizes are arranged so that a single pmd covers 4GB (giving
492	 * a full LP64 process access to 8TB) so our lookups are
493	 * effectively L2 for the first 4GB of the kernel (i.e. for
494	 * all ILP32 processes and all the kernel for machines with
495	 * under 4GB of memory) */
496	.macro		L3_ptep pgd,pte,index,va,fault
497#if PT_NLEVELS == 3     /* we might have a 2-Level scheme, e.g. with 16kb page size */
498	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
499	copy		%r0,\pte
500	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
501	ldw,s		\index(\pgd),\pgd
502	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
503	bb,>=,n		\pgd,_PxD_PRESENT_BIT,\fault
504	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
505	shld		\pgd,PxD_VALUE_SHIFT,\index
506	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
507	copy		\index,\pgd
508	extrd,u,*<>	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
509	ldo		ASM_PGD_PMD_OFFSET(\pgd),\pgd
510#endif
511	L2_ptep		\pgd,\pte,\index,\va,\fault
512	.endm
513
514	/* Set the _PAGE_ACCESSED bit of the PTE.  Be clever and
515	 * don't needlessly dirty the cache line if it was already set */
516	.macro		update_ptep	ptep,pte,tmp,tmp1
517	ldi		_PAGE_ACCESSED,\tmp1
518	or		\tmp1,\pte,\tmp
519	and,COND(<>)	\tmp1,\pte,%r0
520	STREG		\tmp,0(\ptep)
521	.endm
522
523	/* Set the dirty bit (and accessed bit).  No need to be
524	 * clever, this is only used from the dirty fault */
525	.macro		update_dirty	ptep,pte,tmp
526	ldi		_PAGE_ACCESSED|_PAGE_DIRTY,\tmp
527	or		\tmp,\pte,\pte
528	STREG		\pte,0(\ptep)
529	.endm
530
531	/* Convert the pte and prot to tlb insertion values.  How
532	 * this happens is quite subtle, read below */
533	.macro		make_insert_tlb	spc,pte,prot
534	space_to_prot   \spc \prot        /* create prot id from space */
535	/* The following is the real subtlety.  This is depositing
536	 * T <-> _PAGE_REFTRAP
537	 * D <-> _PAGE_DIRTY
538	 * B <-> _PAGE_DMB (memory break)
539	 *
540	 * Then incredible subtlety: The access rights are
541	 * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ
542	 * See 3-14 of the parisc 2.0 manual
543	 *
544	 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
545	 * trigger an access rights trap in user space if the user
546	 * tries to read an unreadable page */
547	depd            \pte,8,7,\prot
548
549	/* PAGE_USER indicates the page can be read with user privileges,
550	 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
551	 * contains _PAGE_READ */
552	extrd,u,*=      \pte,_PAGE_USER_BIT+32,1,%r0
553	depdi		7,11,3,\prot
554	/* If we're a gateway page, drop PL2 back to zero for promotion
555	 * to kernel privilege (so we can execute the page as kernel).
556	 * Any privilege promotion page always denys read and write */
557	extrd,u,*= 	\pte,_PAGE_GATEWAY_BIT+32,1,%r0
558	depd		%r0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
559
560	/* Enforce uncacheable pages.
561	 * This should ONLY be use for MMIO on PA 2.0 machines.
562	 * Memory/DMA is cache coherent on all PA2.0 machines we support
563	 * (that means T-class is NOT supported) and the memory controllers
564	 * on most of those machines only handles cache transactions.
565	 */
566	extrd,u,*=	\pte,_PAGE_NO_CACHE_BIT+32,1,%r0
567	depi		1,12,1,\prot
568
569	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
570	extrd,u		\pte,(63-ASM_PFN_PTE_SHIFT)+(63-58),64-PAGE_SHIFT,\pte
571	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,63-58,\pte
572	.endm
573
574	/* Identical macro to make_insert_tlb above, except it
575	 * makes the tlb entry for the differently formatted pa11
576	 * insertion instructions */
577	.macro		make_insert_tlb_11	spc,pte,prot
578	zdep		\spc,30,15,\prot
579	dep		\pte,8,7,\prot
580	extru,=		\pte,_PAGE_NO_CACHE_BIT,1,%r0
581	depi		1,12,1,\prot
582	extru,=         \pte,_PAGE_USER_BIT,1,%r0
583	depi		7,11,3,\prot   /* Set for user space (1 rsvd for read) */
584	extru,= 	\pte,_PAGE_GATEWAY_BIT,1,%r0
585	depi		0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
586
587	/* Get rid of prot bits and convert to page addr for iitlba */
588
589	depi		_PAGE_SIZE_ENCODING_DEFAULT,31,ASM_PFN_PTE_SHIFT,\pte
590	extru		\pte,24,25,\pte
591	.endm
592
593	/* This is for ILP32 PA2.0 only.  The TLB insertion needs
594	 * to extend into I/O space if the address is 0xfXXXXXXX
595	 * so we extend the f's into the top word of the pte in
596	 * this case */
597	.macro		f_extend	pte,tmp
598	extrd,s		\pte,42,4,\tmp
599	addi,<>		1,\tmp,%r0
600	extrd,s		\pte,63,25,\pte
601	.endm
602
603	/* The alias region is an 8MB aligned 16MB to do clear and
604	 * copy user pages at addresses congruent with the user
605	 * virtual address.
606	 *
607	 * To use the alias page, you set %r26 up with the to TLB
608	 * entry (identifying the physical page) and %r23 up with
609	 * the from tlb entry (or nothing if only a to entry---for
610	 * clear_user_page_asm) */
611	.macro		do_alias	spc,tmp,tmp1,va,pte,prot,fault
612	cmpib,COND(<>),n 0,\spc,\fault
613	ldil		L%(TMPALIAS_MAP_START),\tmp
614#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
615	/* on LP64, ldi will sign extend into the upper 32 bits,
616	 * which is behaviour we don't want */
617	depdi		0,31,32,\tmp
618#endif
619	copy		\va,\tmp1
620	DEPI		0,31,23,\tmp1
621	cmpb,COND(<>),n	\tmp,\tmp1,\fault
622	ldi		(_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot
623	depd,z		\prot,8,7,\prot
624	/*
625	 * OK, it is in the temp alias region, check whether "from" or "to".
626	 * Check "subtle" note in pacache.S re: r23/r26.
627	 */
628#ifdef CONFIG_64BIT
629	extrd,u,*=	\va,41,1,%r0
630#else
631	extrw,u,=	\va,9,1,%r0
632#endif
633	or,COND(tr)	%r23,%r0,\pte
634	or		%r26,%r0,\pte
635	.endm
636
637
638	/*
639	 * Align fault_vector_20 on 4K boundary so that both
640	 * fault_vector_11 and fault_vector_20 are on the
641	 * same page. This is only necessary as long as we
642	 * write protect the kernel text, which we may stop
643	 * doing once we use large page translations to cover
644	 * the static part of the kernel address space.
645	 */
646
647	.text
648
649	.align 4096
650
651ENTRY(fault_vector_20)
652	/* First vector is invalid (0) */
653	.ascii	"cows can fly"
654	.byte 0
655	.align 32
656
657	hpmc		 1
658	def		 2
659	def		 3
660	extint		 4
661	def		 5
662	itlb_20		 6
663	def		 7
664	def		 8
665	def              9
666	def		10
667	def		11
668	def		12
669	def		13
670	def		14
671	dtlb_20		15
672	def             16
673	nadtlb_20	17
674	def		18
675	def		19
676	dbit_20		20
677	def		21
678	def		22
679	def		23
680	def		24
681	def		25
682	def		26
683	def		27
684	def		28
685	def		29
686	def		30
687	def		31
688END(fault_vector_20)
689
690#ifndef CONFIG_64BIT
691
692	.align 2048
693
694ENTRY(fault_vector_11)
695	/* First vector is invalid (0) */
696	.ascii	"cows can fly"
697	.byte 0
698	.align 32
699
700	hpmc		 1
701	def		 2
702	def		 3
703	extint		 4
704	def		 5
705	itlb_11		 6
706	def		 7
707	def		 8
708	def              9
709	def		10
710	def		11
711	def		12
712	def		13
713	def		14
714	dtlb_11		15
715	def             16
716	nadtlb_11	17
717	def		18
718	def		19
719	dbit_11		20
720	def		21
721	def		22
722	def		23
723	def		24
724	def		25
725	def		26
726	def		27
727	def		28
728	def		29
729	def		30
730	def		31
731END(fault_vector_11)
732
733#endif
734
735	.import		handle_interruption,code
736	.import		do_cpu_irq_mask,code
737
738	/*
739	 * r26 = function to be called
740	 * r25 = argument to pass in
741	 * r24 = flags for do_fork()
742	 *
743	 * Kernel threads don't ever return, so they don't need
744	 * a true register context. We just save away the arguments
745	 * for copy_thread/ret_ to properly set up the child.
746	 */
747
748#define CLONE_VM 0x100	/* Must agree with <linux/sched.h> */
749#define CLONE_UNTRACED 0x00800000
750
751	.import do_fork
752ENTRY(__kernel_thread)
753	STREG	%r2, -RP_OFFSET(%r30)
754
755	copy	%r30, %r1
756	ldo	PT_SZ_ALGN(%r30),%r30
757#ifdef CONFIG_64BIT
758	/* Yo, function pointers in wide mode are little structs... -PB */
759	ldd	24(%r26), %r2
760	STREG	%r2, PT_GR27(%r1)	/* Store childs %dp */
761	ldd	16(%r26), %r26
762
763	STREG	%r22, PT_GR22(%r1)	/* save r22 (arg5) */
764	copy	%r0, %r22		/* user_tid */
765#endif
766	STREG	%r26, PT_GR26(%r1)  /* Store function & argument for child */
767	STREG	%r25, PT_GR25(%r1)
768	ldil	L%CLONE_UNTRACED, %r26
769	ldo	CLONE_VM(%r26), %r26   /* Force CLONE_VM since only init_mm */
770	or	%r26, %r24, %r26      /* will have kernel mappings.	 */
771	ldi	1, %r25			/* stack_start, signals kernel thread */
772	stw	%r0, -52(%r30)	     	/* user_tid */
773#ifdef CONFIG_64BIT
774	ldo	-16(%r30),%r29		/* Reference param save area */
775#endif
776	BL	do_fork, %r2
777	copy	%r1, %r24		/* pt_regs */
778
779	/* Parent Returns here */
780
781	LDREG	-PT_SZ_ALGN-RP_OFFSET(%r30), %r2
782	ldo	-PT_SZ_ALGN(%r30), %r30
783	bv	%r0(%r2)
784	nop
785ENDPROC(__kernel_thread)
786
787	/*
788	 * Child Returns here
789	 *
790	 * copy_thread moved args from temp save area set up above
791	 * into task save area.
792	 */
793
794ENTRY(ret_from_kernel_thread)
795
796	/* Call schedule_tail first though */
797	BL	schedule_tail, %r2
798	nop
799
800	LDREG	TI_TASK-THREAD_SZ_ALGN(%r30), %r1
801	LDREG	TASK_PT_GR25(%r1), %r26
802#ifdef CONFIG_64BIT
803	LDREG	TASK_PT_GR27(%r1), %r27
804	LDREG	TASK_PT_GR22(%r1), %r22
805#endif
806	LDREG	TASK_PT_GR26(%r1), %r1
807	ble	0(%sr7, %r1)
808	copy	%r31, %r2
809
810#ifdef CONFIG_64BIT
811	ldo	-16(%r30),%r29		/* Reference param save area */
812	loadgp				/* Thread could have been in a module */
813#endif
814#ifndef CONFIG_64BIT
815	b	sys_exit
816#else
817	load32	sys_exit, %r1
818	bv	%r0(%r1)
819#endif
820	ldi	0, %r26
821ENDPROC(ret_from_kernel_thread)
822
823	.import	sys_execve, code
824ENTRY(__execve)
825	copy	%r2, %r15
826	copy	%r30, %r16
827	ldo	PT_SZ_ALGN(%r30), %r30
828	STREG	%r26, PT_GR26(%r16)
829	STREG	%r25, PT_GR25(%r16)
830	STREG	%r24, PT_GR24(%r16)
831#ifdef CONFIG_64BIT
832	ldo	-16(%r30),%r29		/* Reference param save area */
833#endif
834	BL	sys_execve, %r2
835	copy	%r16, %r26
836
837	cmpib,=,n 0,%r28,intr_return    /* forward */
838
839	/* yes, this will trap and die. */
840	copy	%r15, %r2
841	copy	%r16, %r30
842	bv	%r0(%r2)
843	nop
844ENDPROC(__execve)
845
846
847	/*
848	 * struct task_struct *_switch_to(struct task_struct *prev,
849	 *	struct task_struct *next)
850	 *
851	 * switch kernel stacks and return prev */
852ENTRY(_switch_to)
853	STREG	 %r2, -RP_OFFSET(%r30)
854
855	callee_save_float
856	callee_save
857
858	load32	_switch_to_ret, %r2
859
860	STREG	%r2, TASK_PT_KPC(%r26)
861	LDREG	TASK_PT_KPC(%r25), %r2
862
863	STREG	%r30, TASK_PT_KSP(%r26)
864	LDREG	TASK_PT_KSP(%r25), %r30
865	LDREG	TASK_THREAD_INFO(%r25), %r25
866	bv	%r0(%r2)
867	mtctl   %r25,%cr30
868
869_switch_to_ret:
870	mtctl	%r0, %cr0		/* Needed for single stepping */
871	callee_rest
872	callee_rest_float
873
874	LDREG	-RP_OFFSET(%r30), %r2
875	bv	%r0(%r2)
876	copy	%r26, %r28
877ENDPROC(_switch_to)
878
879
880	.align 4096
881
882ENTRY(syscall_exit_rfi)
883	mfctl   %cr30,%r16
884	LDREG	TI_TASK(%r16), %r16	/* thread_info -> task_struct */
885	ldo	TASK_REGS(%r16),%r16
886	/* Force iaoq to userspace, as the user has had access to our current
887	 * context via sigcontext. Also Filter the PSW for the same reason.
888	 */
889	LDREG	PT_IAOQ0(%r16),%r19
890	depi	3,31,2,%r19
891	STREG	%r19,PT_IAOQ0(%r16)
892	LDREG	PT_IAOQ1(%r16),%r19
893	depi	3,31,2,%r19
894	STREG	%r19,PT_IAOQ1(%r16)
895	LDREG   PT_PSW(%r16),%r19
896	load32	USER_PSW_MASK,%r1
897#ifdef CONFIG_64BIT
898	load32	USER_PSW_HI_MASK,%r20
899	depd    %r20,31,32,%r1
900#endif
901	and     %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
902	load32	USER_PSW,%r1
903	or      %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
904	STREG   %r19,PT_PSW(%r16)
905
906	/*
907	 * If we aren't being traced, we never saved space registers
908	 * (we don't store them in the sigcontext), so set them
909	 * to "proper" values now (otherwise we'll wind up restoring
910	 * whatever was last stored in the task structure, which might
911	 * be inconsistent if an interrupt occured while on the gateway
912	 * page). Note that we may be "trashing" values the user put in
913	 * them, but we don't support the user changing them.
914	 */
915
916	STREG   %r0,PT_SR2(%r16)
917	mfsp    %sr3,%r19
918	STREG   %r19,PT_SR0(%r16)
919	STREG   %r19,PT_SR1(%r16)
920	STREG   %r19,PT_SR3(%r16)
921	STREG   %r19,PT_SR4(%r16)
922	STREG   %r19,PT_SR5(%r16)
923	STREG   %r19,PT_SR6(%r16)
924	STREG   %r19,PT_SR7(%r16)
925
926intr_return:
927	/* NOTE: Need to enable interrupts incase we schedule. */
928	ssm     PSW_SM_I, %r0
929
930intr_check_resched:
931
932	/* check for reschedule */
933	mfctl   %cr30,%r1
934	LDREG   TI_FLAGS(%r1),%r19	/* sched.h: TIF_NEED_RESCHED */
935	bb,<,n	%r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
936
937	.import do_notify_resume,code
938intr_check_sig:
939	/* As above */
940	mfctl   %cr30,%r1
941	LDREG	TI_FLAGS(%r1),%r19
942	ldi	(_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r20
943	and,COND(<>)	%r19, %r20, %r0
944	b,n	intr_restore	/* skip past if we've nothing to do */
945
946	/* This check is critical to having LWS
947	 * working. The IASQ is zero on the gateway
948	 * page and we cannot deliver any signals until
949	 * we get off the gateway page.
950	 *
951	 * Only do signals if we are returning to user space
952	 */
953	LDREG	PT_IASQ0(%r16), %r20
954	CMPIB=,n 0,%r20,intr_restore /* backward */
955	LDREG	PT_IASQ1(%r16), %r20
956	CMPIB=,n 0,%r20,intr_restore /* backward */
957
958	copy	%r0, %r25			/* long in_syscall = 0 */
959#ifdef CONFIG_64BIT
960	ldo	-16(%r30),%r29			/* Reference param save area */
961#endif
962
963	BL	do_notify_resume,%r2
964	copy	%r16, %r26			/* struct pt_regs *regs */
965
966	b,n	intr_check_sig
967
968intr_restore:
969	copy            %r16,%r29
970	ldo             PT_FR31(%r29),%r1
971	rest_fp         %r1
972	rest_general    %r29
973
974	/* inverse of virt_map */
975	pcxt_ssm_bug
976	rsm             PSW_SM_QUIET,%r0	/* prepare for rfi */
977	tophys_r1       %r29
978
979	/* Restore space id's and special cr's from PT_REGS
980	 * structure pointed to by r29
981	 */
982	rest_specials	%r29
983
984	/* IMPORTANT: rest_stack restores r29 last (we are using it)!
985	 * It also restores r1 and r30.
986	 */
987	rest_stack
988
989	rfi
990	nop
991	nop
992	nop
993	nop
994	nop
995	nop
996	nop
997	nop
998
999#ifndef CONFIG_PREEMPT
1000# define intr_do_preempt	intr_restore
1001#endif /* !CONFIG_PREEMPT */
1002
1003	.import schedule,code
1004intr_do_resched:
1005	/* Only call schedule on return to userspace. If we're returning
1006	 * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
1007	 * we jump back to intr_restore.
1008	 */
1009	LDREG	PT_IASQ0(%r16), %r20
1010	CMPIB=	0, %r20, intr_do_preempt
1011	nop
1012	LDREG	PT_IASQ1(%r16), %r20
1013	CMPIB=	0, %r20, intr_do_preempt
1014	nop
1015
1016#ifdef CONFIG_64BIT
1017	ldo	-16(%r30),%r29		/* Reference param save area */
1018#endif
1019
1020	ldil	L%intr_check_sig, %r2
1021#ifndef CONFIG_64BIT
1022	b	schedule
1023#else
1024	load32	schedule, %r20
1025	bv	%r0(%r20)
1026#endif
1027	ldo	R%intr_check_sig(%r2), %r2
1028
1029	/* preempt the current task on returning to kernel
1030	 * mode from an interrupt, iff need_resched is set,
1031	 * and preempt_count is 0. otherwise, we continue on
1032	 * our merry way back to the current running task.
1033	 */
1034#ifdef CONFIG_PREEMPT
1035	.import preempt_schedule_irq,code
1036intr_do_preempt:
1037	rsm	PSW_SM_I, %r0		/* disable interrupts */
1038
1039	/* current_thread_info()->preempt_count */
1040	mfctl	%cr30, %r1
1041	LDREG	TI_PRE_COUNT(%r1), %r19
1042	CMPIB<>	0, %r19, intr_restore	/* if preempt_count > 0 */
1043	nop				/* prev insn branched backwards */
1044
1045	/* check if we interrupted a critical path */
1046	LDREG	PT_PSW(%r16), %r20
1047	bb,<,n	%r20, 31 - PSW_SM_I, intr_restore
1048	nop
1049
1050	BL	preempt_schedule_irq, %r2
1051	nop
1052
1053	b,n	intr_restore		/* ssm PSW_SM_I done by intr_restore */
1054#endif /* CONFIG_PREEMPT */
1055
1056	/*
1057	 * External interrupts.
1058	 */
1059
1060intr_extint:
1061	CMPIB=,n 0,%r16,1f
1062	get_stack_use_cr30
1063	b,n 3f
1064
10651:
10662:
1067	get_stack_use_r30
1068
10693:
1070	save_specials	%r29
1071	virt_map
1072	save_general	%r29
1073
1074	ldo	PT_FR0(%r29), %r24
1075	save_fp	%r24
1076
1077	loadgp
1078
1079	copy	%r29, %r26	/* arg0 is pt_regs */
1080	copy	%r29, %r16	/* save pt_regs */
1081
1082	ldil	L%intr_return, %r2
1083
1084#ifdef CONFIG_64BIT
1085	ldo	-16(%r30),%r29	/* Reference param save area */
1086#endif
1087
1088	b	do_cpu_irq_mask
1089	ldo	R%intr_return(%r2), %r2	/* return to intr_return, not here */
1090ENDPROC(syscall_exit_rfi)
1091
1092
1093	/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1094
1095ENTRY(intr_save)		/* for os_hpmc */
1096	mfsp    %sr7,%r16
1097	CMPIB=,n 0,%r16,1f
1098	get_stack_use_cr30
1099	b	2f
1100	copy    %r8,%r26
1101
11021:
1103	get_stack_use_r30
1104	copy    %r8,%r26
1105
11062:
1107	save_specials	%r29
1108
1109	/* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1110
1111
1112	CMPIB=,n        6,%r26,skip_save_ior
1113
1114
1115	mfctl           %cr20, %r16 /* isr */
1116	nop		/* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1117	mfctl           %cr21, %r17 /* ior */
1118
1119
1120#ifdef CONFIG_64BIT
1121	/*
1122	 * If the interrupted code was running with W bit off (32 bit),
1123	 * clear the b bits (bits 0 & 1) in the ior.
1124	 * save_specials left ipsw value in r8 for us to test.
1125	 */
1126	extrd,u,*<>     %r8,PSW_W_BIT,1,%r0
1127	depdi           0,1,2,%r17
1128
1129
1130	/* adjust isr/ior. */
1131	extrd,u         %r16,63,SPACEID_SHIFT,%r1	/* get high bits from isr for ior */
1132	depd            %r1,31,SPACEID_SHIFT,%r17	/* deposit them into ior */
1133	depdi           0,63,SPACEID_SHIFT,%r16		/* clear them from isr */
1134#endif
1135	STREG           %r16, PT_ISR(%r29)
1136	STREG           %r17, PT_IOR(%r29)
1137
1138
1139skip_save_ior:
1140	virt_map
1141	save_general	%r29
1142
1143	ldo		PT_FR0(%r29), %r25
1144	save_fp		%r25
1145
1146	loadgp
1147
1148	copy		%r29, %r25	/* arg1 is pt_regs */
1149#ifdef CONFIG_64BIT
1150	ldo		-16(%r30),%r29	/* Reference param save area */
1151#endif
1152
1153	ldil		L%intr_check_sig, %r2
1154	copy		%r25, %r16	/* save pt_regs */
1155
1156	b		handle_interruption
1157	ldo		R%intr_check_sig(%r2), %r2
1158ENDPROC(intr_save)
1159
1160
1161	/*
1162	 * Note for all tlb miss handlers:
1163	 *
1164	 * cr24 contains a pointer to the kernel address space
1165	 * page directory.
1166	 *
1167	 * cr25 contains a pointer to the current user address
1168	 * space page directory.
1169	 *
1170	 * sr3 will contain the space id of the user address space
1171	 * of the current running thread while that thread is
1172	 * running in the kernel.
1173	 */
1174
1175	/*
1176	 * register number allocations.  Note that these are all
1177	 * in the shadowed registers
1178	 */
1179
1180	t0 = r1		/* temporary register 0 */
1181	va = r8		/* virtual address for which the trap occured */
1182	t1 = r9		/* temporary register 1 */
1183	pte  = r16	/* pte/phys page # */
1184	prot = r17	/* prot bits */
1185	spc  = r24	/* space for which the trap occured */
1186	ptp = r25	/* page directory/page table pointer */
1187
1188#ifdef CONFIG_64BIT
1189
1190dtlb_miss_20w:
1191	space_adjust	spc,va,t0
1192	get_pgd		spc,ptp
1193	space_check	spc,t0,dtlb_fault
1194
1195	L3_ptep		ptp,pte,t0,va,dtlb_check_alias_20w
1196
1197	update_ptep	ptp,pte,t0,t1
1198
1199	make_insert_tlb	spc,pte,prot
1200
1201	idtlbt          pte,prot
1202
1203	rfir
1204	nop
1205
1206dtlb_check_alias_20w:
1207	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault
1208
1209	idtlbt          pte,prot
1210
1211	rfir
1212	nop
1213
1214nadtlb_miss_20w:
1215	space_adjust	spc,va,t0
1216	get_pgd		spc,ptp
1217	space_check	spc,t0,nadtlb_fault
1218
1219	L3_ptep		ptp,pte,t0,va,nadtlb_check_flush_20w
1220
1221	update_ptep	ptp,pte,t0,t1
1222
1223	make_insert_tlb	spc,pte,prot
1224
1225	idtlbt          pte,prot
1226
1227	rfir
1228	nop
1229
1230nadtlb_check_flush_20w:
1231	bb,>=,n          pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1232
1233	/* Insert a "flush only" translation */
1234
1235	depdi,z         7,7,3,prot
1236	depdi           1,10,1,prot
1237
1238	/* Get rid of prot bits and convert to page addr for idtlbt */
1239
1240	depdi		0,63,12,pte
1241	extrd,u         pte,56,52,pte
1242	idtlbt          pte,prot
1243
1244	rfir
1245	nop
1246
1247#else
1248
1249dtlb_miss_11:
1250	get_pgd		spc,ptp
1251
1252	space_check	spc,t0,dtlb_fault
1253
1254	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_11
1255
1256	update_ptep	ptp,pte,t0,t1
1257
1258	make_insert_tlb_11	spc,pte,prot
1259
1260	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1261	mtsp		spc,%sr1
1262
1263	idtlba		pte,(%sr1,va)
1264	idtlbp		prot,(%sr1,va)
1265
1266	mtsp		t0, %sr1	/* Restore sr1 */
1267
1268	rfir
1269	nop
1270
1271dtlb_check_alias_11:
1272
1273	/* Check to see if fault is in the temporary alias region */
1274
1275	cmpib,<>,n      0,spc,dtlb_fault /* forward */
1276	ldil            L%(TMPALIAS_MAP_START),t0
1277	copy            va,t1
1278	depwi           0,31,23,t1
1279	cmpb,<>,n       t0,t1,dtlb_fault /* forward */
1280	ldi             (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot
1281	depw,z          prot,8,7,prot
1282
1283	/*
1284	 * OK, it is in the temp alias region, check whether "from" or "to".
1285	 * Check "subtle" note in pacache.S re: r23/r26.
1286	 */
1287
1288	extrw,u,=       va,9,1,r0
1289	or,tr           %r23,%r0,pte    /* If "from" use "from" page */
1290	or              %r26,%r0,pte    /* else "to", use "to" page  */
1291
1292	idtlba          pte,(va)
1293	idtlbp          prot,(va)
1294
1295	rfir
1296	nop
1297
1298nadtlb_miss_11:
1299	get_pgd		spc,ptp
1300
1301	space_check	spc,t0,nadtlb_fault
1302
1303	L2_ptep		ptp,pte,t0,va,nadtlb_check_flush_11
1304
1305	update_ptep	ptp,pte,t0,t1
1306
1307	make_insert_tlb_11	spc,pte,prot
1308
1309
1310	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1311	mtsp		spc,%sr1
1312
1313	idtlba		pte,(%sr1,va)
1314	idtlbp		prot,(%sr1,va)
1315
1316	mtsp		t0, %sr1	/* Restore sr1 */
1317
1318	rfir
1319	nop
1320
1321nadtlb_check_flush_11:
1322	bb,>=,n          pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1323
1324	/* Insert a "flush only" translation */
1325
1326	zdepi           7,7,3,prot
1327	depi            1,10,1,prot
1328
1329	/* Get rid of prot bits and convert to page addr for idtlba */
1330
1331	depi		0,31,12,pte
1332	extru		pte,24,25,pte
1333
1334	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1335	mtsp		spc,%sr1
1336
1337	idtlba		pte,(%sr1,va)
1338	idtlbp		prot,(%sr1,va)
1339
1340	mtsp		t0, %sr1	/* Restore sr1 */
1341
1342	rfir
1343	nop
1344
1345dtlb_miss_20:
1346	space_adjust	spc,va,t0
1347	get_pgd		spc,ptp
1348	space_check	spc,t0,dtlb_fault
1349
1350	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_20
1351
1352	update_ptep	ptp,pte,t0,t1
1353
1354	make_insert_tlb	spc,pte,prot
1355
1356	f_extend	pte,t0
1357
1358	idtlbt          pte,prot
1359
1360	rfir
1361	nop
1362
1363dtlb_check_alias_20:
1364	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault
1365
1366	idtlbt          pte,prot
1367
1368	rfir
1369	nop
1370
1371nadtlb_miss_20:
1372	get_pgd		spc,ptp
1373
1374	space_check	spc,t0,nadtlb_fault
1375
1376	L2_ptep		ptp,pte,t0,va,nadtlb_check_flush_20
1377
1378	update_ptep	ptp,pte,t0,t1
1379
1380	make_insert_tlb	spc,pte,prot
1381
1382	f_extend	pte,t0
1383
1384        idtlbt          pte,prot
1385
1386	rfir
1387	nop
1388
1389nadtlb_check_flush_20:
1390	bb,>=,n          pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1391
1392	/* Insert a "flush only" translation */
1393
1394	depdi,z         7,7,3,prot
1395	depdi           1,10,1,prot
1396
1397	/* Get rid of prot bits and convert to page addr for idtlbt */
1398
1399	depdi		0,63,12,pte
1400	extrd,u         pte,56,32,pte
1401	idtlbt          pte,prot
1402
1403	rfir
1404	nop
1405#endif
1406
1407nadtlb_emulate:
1408
1409	/*
1410	 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1411	 * probei instructions. We don't want to fault for these
1412	 * instructions (not only does it not make sense, it can cause
1413	 * deadlocks, since some flushes are done with the mmap
1414	 * semaphore held). If the translation doesn't exist, we can't
1415	 * insert a translation, so have to emulate the side effects
1416	 * of the instruction. Since we don't insert a translation
1417	 * we can get a lot of faults during a flush loop, so it makes
1418	 * sense to try to do it here with minimum overhead. We only
1419	 * emulate fdc,fic,pdc,probew,prober instructions whose base
1420	 * and index registers are not shadowed. We defer everything
1421	 * else to the "slow" path.
1422	 */
1423
1424	mfctl           %cr19,%r9 /* Get iir */
1425
1426	/* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1427	   Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1428
1429	/* Checks for fdc,fdce,pdc,"fic,4f" only */
1430	ldi             0x280,%r16
1431	and             %r9,%r16,%r17
1432	cmpb,<>,n       %r16,%r17,nadtlb_probe_check
1433	bb,>=,n         %r9,26,nadtlb_nullify  /* m bit not set, just nullify */
1434	BL		get_register,%r25
1435	extrw,u         %r9,15,5,%r8           /* Get index register # */
1436	CMPIB=,n        -1,%r1,nadtlb_fault    /* have to use slow path */
1437	copy            %r1,%r24
1438	BL		get_register,%r25
1439	extrw,u         %r9,10,5,%r8           /* Get base register # */
1440	CMPIB=,n        -1,%r1,nadtlb_fault    /* have to use slow path */
1441	BL		set_register,%r25
1442	add,l           %r1,%r24,%r1           /* doesn't affect c/b bits */
1443
1444nadtlb_nullify:
1445	mfctl           %ipsw,%r8
1446	ldil            L%PSW_N,%r9
1447	or              %r8,%r9,%r8            /* Set PSW_N */
1448	mtctl           %r8,%ipsw
1449
1450	rfir
1451	nop
1452
1453	/*
1454		When there is no translation for the probe address then we
1455		must nullify the insn and return zero in the target regsiter.
1456		This will indicate to the calling code that it does not have
1457		write/read privileges to this address.
1458
1459		This should technically work for prober and probew in PA 1.1,
1460		and also probe,r and probe,w in PA 2.0
1461
1462		WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1463		THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1464
1465	*/
1466nadtlb_probe_check:
1467	ldi             0x80,%r16
1468	and             %r9,%r16,%r17
1469	cmpb,<>,n       %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1470	BL              get_register,%r25      /* Find the target register */
1471	extrw,u         %r9,31,5,%r8           /* Get target register */
1472	CMPIB=,n        -1,%r1,nadtlb_fault    /* have to use slow path */
1473	BL		set_register,%r25
1474	copy            %r0,%r1                /* Write zero to target register */
1475	b nadtlb_nullify                       /* Nullify return insn */
1476	nop
1477
1478
1479#ifdef CONFIG_64BIT
1480itlb_miss_20w:
1481
1482	/*
1483	 * I miss is a little different, since we allow users to fault
1484	 * on the gateway page which is in the kernel address space.
1485	 */
1486
1487	space_adjust	spc,va,t0
1488	get_pgd		spc,ptp
1489	space_check	spc,t0,itlb_fault
1490
1491	L3_ptep		ptp,pte,t0,va,itlb_fault
1492
1493	update_ptep	ptp,pte,t0,t1
1494
1495	make_insert_tlb	spc,pte,prot
1496
1497	iitlbt          pte,prot
1498
1499	rfir
1500	nop
1501
1502#else
1503
1504itlb_miss_11:
1505	get_pgd		spc,ptp
1506
1507	space_check	spc,t0,itlb_fault
1508
1509	L2_ptep		ptp,pte,t0,va,itlb_fault
1510
1511	update_ptep	ptp,pte,t0,t1
1512
1513	make_insert_tlb_11	spc,pte,prot
1514
1515	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1516	mtsp		spc,%sr1
1517
1518	iitlba		pte,(%sr1,va)
1519	iitlbp		prot,(%sr1,va)
1520
1521	mtsp		t0, %sr1	/* Restore sr1 */
1522
1523	rfir
1524	nop
1525
1526itlb_miss_20:
1527	get_pgd		spc,ptp
1528
1529	space_check	spc,t0,itlb_fault
1530
1531	L2_ptep		ptp,pte,t0,va,itlb_fault
1532
1533	update_ptep	ptp,pte,t0,t1
1534
1535	make_insert_tlb	spc,pte,prot
1536
1537	f_extend	pte,t0
1538
1539	iitlbt          pte,prot
1540
1541	rfir
1542	nop
1543
1544#endif
1545
1546#ifdef CONFIG_64BIT
1547
1548dbit_trap_20w:
1549	space_adjust	spc,va,t0
1550	get_pgd		spc,ptp
1551	space_check	spc,t0,dbit_fault
1552
1553	L3_ptep		ptp,pte,t0,va,dbit_fault
1554
1555#ifdef CONFIG_SMP
1556	CMPIB=,n        0,spc,dbit_nolock_20w
1557	load32		PA(pa_dbit_lock),t0
1558
1559dbit_spin_20w:
1560	LDCW		0(t0),t1
1561	cmpib,=         0,t1,dbit_spin_20w
1562	nop
1563
1564dbit_nolock_20w:
1565#endif
1566	update_dirty	ptp,pte,t1
1567
1568	make_insert_tlb	spc,pte,prot
1569
1570	idtlbt          pte,prot
1571#ifdef CONFIG_SMP
1572	CMPIB=,n        0,spc,dbit_nounlock_20w
1573	ldi             1,t1
1574	stw             t1,0(t0)
1575
1576dbit_nounlock_20w:
1577#endif
1578
1579	rfir
1580	nop
1581#else
1582
1583dbit_trap_11:
1584
1585	get_pgd		spc,ptp
1586
1587	space_check	spc,t0,dbit_fault
1588
1589	L2_ptep		ptp,pte,t0,va,dbit_fault
1590
1591#ifdef CONFIG_SMP
1592	CMPIB=,n        0,spc,dbit_nolock_11
1593	load32		PA(pa_dbit_lock),t0
1594
1595dbit_spin_11:
1596	LDCW		0(t0),t1
1597	cmpib,=         0,t1,dbit_spin_11
1598	nop
1599
1600dbit_nolock_11:
1601#endif
1602	update_dirty	ptp,pte,t1
1603
1604	make_insert_tlb_11	spc,pte,prot
1605
1606	mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1607	mtsp		spc,%sr1
1608
1609	idtlba		pte,(%sr1,va)
1610	idtlbp		prot,(%sr1,va)
1611
1612	mtsp            t1, %sr1     /* Restore sr1 */
1613#ifdef CONFIG_SMP
1614	CMPIB=,n        0,spc,dbit_nounlock_11
1615	ldi             1,t1
1616	stw             t1,0(t0)
1617
1618dbit_nounlock_11:
1619#endif
1620
1621	rfir
1622	nop
1623
1624dbit_trap_20:
1625	get_pgd		spc,ptp
1626
1627	space_check	spc,t0,dbit_fault
1628
1629	L2_ptep		ptp,pte,t0,va,dbit_fault
1630
1631#ifdef CONFIG_SMP
1632	CMPIB=,n        0,spc,dbit_nolock_20
1633	load32		PA(pa_dbit_lock),t0
1634
1635dbit_spin_20:
1636	LDCW		0(t0),t1
1637	cmpib,=         0,t1,dbit_spin_20
1638	nop
1639
1640dbit_nolock_20:
1641#endif
1642	update_dirty	ptp,pte,t1
1643
1644	make_insert_tlb	spc,pte,prot
1645
1646	f_extend	pte,t1
1647
1648        idtlbt          pte,prot
1649
1650#ifdef CONFIG_SMP
1651	CMPIB=,n        0,spc,dbit_nounlock_20
1652	ldi             1,t1
1653	stw             t1,0(t0)
1654
1655dbit_nounlock_20:
1656#endif
1657
1658	rfir
1659	nop
1660#endif
1661
1662	.import handle_interruption,code
1663
1664kernel_bad_space:
1665	b               intr_save
1666	ldi             31,%r8  /* Use an unused code */
1667
1668dbit_fault:
1669	b               intr_save
1670	ldi             20,%r8
1671
1672itlb_fault:
1673	b               intr_save
1674	ldi             6,%r8
1675
1676nadtlb_fault:
1677	b               intr_save
1678	ldi             17,%r8
1679
1680dtlb_fault:
1681	b               intr_save
1682	ldi             15,%r8
1683
1684	/* Register saving semantics for system calls:
1685
1686	   %r1		   clobbered by system call macro in userspace
1687	   %r2		   saved in PT_REGS by gateway page
1688	   %r3  - %r18	   preserved by C code (saved by signal code)
1689	   %r19 - %r20	   saved in PT_REGS by gateway page
1690	   %r21 - %r22	   non-standard syscall args
1691			   stored in kernel stack by gateway page
1692	   %r23 - %r26	   arg3-arg0, saved in PT_REGS by gateway page
1693	   %r27 - %r30	   saved in PT_REGS by gateway page
1694	   %r31		   syscall return pointer
1695	 */
1696
1697
1698	.macro	reg_save regs
1699	STREG	%r3, PT_GR3(\regs)
1700	STREG	%r4, PT_GR4(\regs)
1701	STREG	%r5, PT_GR5(\regs)
1702	STREG	%r6, PT_GR6(\regs)
1703	STREG	%r7, PT_GR7(\regs)
1704	STREG	%r8, PT_GR8(\regs)
1705	STREG	%r9, PT_GR9(\regs)
1706	STREG   %r10,PT_GR10(\regs)
1707	STREG   %r11,PT_GR11(\regs)
1708	STREG   %r12,PT_GR12(\regs)
1709	STREG   %r13,PT_GR13(\regs)
1710	STREG   %r14,PT_GR14(\regs)
1711	STREG   %r15,PT_GR15(\regs)
1712	STREG   %r16,PT_GR16(\regs)
1713	STREG   %r17,PT_GR17(\regs)
1714	STREG   %r18,PT_GR18(\regs)
1715	.endm
1716
1717	.macro	reg_restore regs
1718	LDREG	PT_GR3(\regs), %r3
1719	LDREG	PT_GR4(\regs), %r4
1720	LDREG	PT_GR5(\regs), %r5
1721	LDREG	PT_GR6(\regs), %r6
1722	LDREG	PT_GR7(\regs), %r7
1723	LDREG	PT_GR8(\regs), %r8
1724	LDREG	PT_GR9(\regs), %r9
1725	LDREG   PT_GR10(\regs),%r10
1726	LDREG   PT_GR11(\regs),%r11
1727	LDREG   PT_GR12(\regs),%r12
1728	LDREG   PT_GR13(\regs),%r13
1729	LDREG   PT_GR14(\regs),%r14
1730	LDREG   PT_GR15(\regs),%r15
1731	LDREG   PT_GR16(\regs),%r16
1732	LDREG   PT_GR17(\regs),%r17
1733	LDREG   PT_GR18(\regs),%r18
1734	.endm
1735
1736ENTRY(sys_fork_wrapper)
1737	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1738	ldo	TASK_REGS(%r1),%r1
1739	reg_save %r1
1740	mfctl	%cr27, %r3
1741	STREG	%r3, PT_CR27(%r1)
1742
1743	STREG	%r2,-RP_OFFSET(%r30)
1744	ldo	FRAME_SIZE(%r30),%r30
1745#ifdef CONFIG_64BIT
1746	ldo	-16(%r30),%r29		/* Reference param save area */
1747#endif
1748
1749	/* These are call-clobbered registers and therefore
1750	   also syscall-clobbered (we hope). */
1751	STREG	%r2,PT_GR19(%r1)	/* save for child */
1752	STREG	%r30,PT_GR21(%r1)
1753
1754	LDREG	PT_GR30(%r1),%r25
1755	copy	%r1,%r24
1756	BL	sys_clone,%r2
1757	ldi	SIGCHLD,%r26
1758
1759	LDREG	-RP_OFFSET-FRAME_SIZE(%r30),%r2
1760wrapper_exit:
1761	ldo	-FRAME_SIZE(%r30),%r30		/* get the stackframe */
1762	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1763	ldo	TASK_REGS(%r1),%r1	 /* get pt regs */
1764
1765	LDREG	PT_CR27(%r1), %r3
1766	mtctl	%r3, %cr27
1767	reg_restore %r1
1768
1769	/* strace expects syscall # to be preserved in r20 */
1770	ldi	__NR_fork,%r20
1771	bv %r0(%r2)
1772	STREG	%r20,PT_GR20(%r1)
1773ENDPROC(sys_fork_wrapper)
1774
1775	/* Set the return value for the child */
1776ENTRY(child_return)
1777	BL	schedule_tail, %r2
1778	nop
1779
1780	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE-FRAME_SIZE(%r30), %r1
1781	LDREG	TASK_PT_GR19(%r1),%r2
1782	b	wrapper_exit
1783	copy	%r0,%r28
1784ENDPROC(child_return)
1785
1786
1787ENTRY(sys_clone_wrapper)
1788	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1789	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1790	reg_save %r1
1791	mfctl	%cr27, %r3
1792	STREG	%r3, PT_CR27(%r1)
1793
1794	STREG	%r2,-RP_OFFSET(%r30)
1795	ldo	FRAME_SIZE(%r30),%r30
1796#ifdef CONFIG_64BIT
1797	ldo	-16(%r30),%r29		/* Reference param save area */
1798#endif
1799
1800	/* WARNING - Clobbers r19 and r21, userspace must save these! */
1801	STREG	%r2,PT_GR19(%r1)	/* save for child */
1802	STREG	%r30,PT_GR21(%r1)
1803	BL	sys_clone,%r2
1804	copy	%r1,%r24
1805
1806	b	wrapper_exit
1807	LDREG	-RP_OFFSET-FRAME_SIZE(%r30),%r2
1808ENDPROC(sys_clone_wrapper)
1809
1810
1811ENTRY(sys_vfork_wrapper)
1812	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1813	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1814	reg_save %r1
1815	mfctl	%cr27, %r3
1816	STREG	%r3, PT_CR27(%r1)
1817
1818	STREG	%r2,-RP_OFFSET(%r30)
1819	ldo	FRAME_SIZE(%r30),%r30
1820#ifdef CONFIG_64BIT
1821	ldo	-16(%r30),%r29		/* Reference param save area */
1822#endif
1823
1824	STREG	%r2,PT_GR19(%r1)	/* save for child */
1825	STREG	%r30,PT_GR21(%r1)
1826
1827	BL	sys_vfork,%r2
1828	copy	%r1,%r26
1829
1830	b	wrapper_exit
1831	LDREG	-RP_OFFSET-FRAME_SIZE(%r30),%r2
1832ENDPROC(sys_vfork_wrapper)
1833
1834
1835	.macro  execve_wrapper execve
1836	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1837	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1838
1839	/*
1840	 * Do we need to save/restore r3-r18 here?
1841	 * I don't think so. why would new thread need old
1842	 * threads registers?
1843	 */
1844
1845	/* %arg0 - %arg3 are already saved for us. */
1846
1847	STREG %r2,-RP_OFFSET(%r30)
1848	ldo FRAME_SIZE(%r30),%r30
1849#ifdef CONFIG_64BIT
1850	ldo	-16(%r30),%r29		/* Reference param save area */
1851#endif
1852	BL \execve,%r2
1853	copy %r1,%arg0
1854
1855	ldo -FRAME_SIZE(%r30),%r30
1856	LDREG -RP_OFFSET(%r30),%r2
1857
1858	/* If exec succeeded we need to load the args */
1859
1860	ldo -1024(%r0),%r1
1861	cmpb,>>= %r28,%r1,error_\execve
1862	copy %r2,%r19
1863
1864error_\execve:
1865	bv %r0(%r19)
1866	nop
1867	.endm
1868
1869	.import sys_execve
1870ENTRY(sys_execve_wrapper)
1871	execve_wrapper sys_execve
1872ENDPROC(sys_execve_wrapper)
1873
1874#ifdef CONFIG_64BIT
1875	.import sys32_execve
1876ENTRY(sys32_execve_wrapper)
1877	execve_wrapper sys32_execve
1878ENDPROC(sys32_execve_wrapper)
1879#endif
1880
1881ENTRY(sys_rt_sigreturn_wrapper)
1882	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1883	ldo	TASK_REGS(%r26),%r26	/* get pt regs */
1884	/* Don't save regs, we are going to restore them from sigcontext. */
1885	STREG	%r2, -RP_OFFSET(%r30)
1886#ifdef CONFIG_64BIT
1887	ldo	FRAME_SIZE(%r30), %r30
1888	BL	sys_rt_sigreturn,%r2
1889	ldo	-16(%r30),%r29		/* Reference param save area */
1890#else
1891	BL	sys_rt_sigreturn,%r2
1892	ldo	FRAME_SIZE(%r30), %r30
1893#endif
1894
1895	ldo	-FRAME_SIZE(%r30), %r30
1896	LDREG	-RP_OFFSET(%r30), %r2
1897
1898	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1899	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1900	reg_restore %r1
1901
1902	/* If the signal was received while the process was blocked on a
1903	 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1904	 * take us to syscall_exit_rfi and on to intr_return.
1905	 */
1906	bv	%r0(%r2)
1907	LDREG	PT_GR28(%r1),%r28  /* reload original r28 for syscall_exit */
1908ENDPROC(sys_rt_sigreturn_wrapper)
1909
1910ENTRY(sys_sigaltstack_wrapper)
1911	/* Get the user stack pointer */
1912	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1913	ldo	TASK_REGS(%r1),%r24	/* get pt regs */
1914	LDREG	TASK_PT_GR30(%r24),%r24
1915	STREG	%r2, -RP_OFFSET(%r30)
1916#ifdef CONFIG_64BIT
1917	ldo	FRAME_SIZE(%r30), %r30
1918	BL	do_sigaltstack,%r2
1919	ldo	-16(%r30),%r29		/* Reference param save area */
1920#else
1921	BL	do_sigaltstack,%r2
1922	ldo	FRAME_SIZE(%r30), %r30
1923#endif
1924
1925	ldo	-FRAME_SIZE(%r30), %r30
1926	LDREG	-RP_OFFSET(%r30), %r2
1927	bv	%r0(%r2)
1928	nop
1929ENDPROC(sys_sigaltstack_wrapper)
1930
1931#ifdef CONFIG_64BIT
1932ENTRY(sys32_sigaltstack_wrapper)
1933	/* Get the user stack pointer */
1934	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r24
1935	LDREG	TASK_PT_GR30(%r24),%r24
1936	STREG	%r2, -RP_OFFSET(%r30)
1937	ldo	FRAME_SIZE(%r30), %r30
1938	BL	do_sigaltstack32,%r2
1939	ldo	-16(%r30),%r29		/* Reference param save area */
1940
1941	ldo	-FRAME_SIZE(%r30), %r30
1942	LDREG	-RP_OFFSET(%r30), %r2
1943	bv	%r0(%r2)
1944	nop
1945ENDPROC(sys32_sigaltstack_wrapper)
1946#endif
1947
1948ENTRY(syscall_exit)
1949	/* NOTE: HP-UX syscalls also come through here
1950	 * after hpux_syscall_exit fixes up return
1951	 * values. */
1952
1953	/* NOTE: Not all syscalls exit this way.  rt_sigreturn will exit
1954	 * via syscall_exit_rfi if the signal was received while the process
1955	 * was running.
1956	 */
1957
1958	/* save return value now */
1959
1960	mfctl     %cr30, %r1
1961	LDREG     TI_TASK(%r1),%r1
1962	STREG     %r28,TASK_PT_GR28(%r1)
1963
1964#ifdef CONFIG_HPUX
1965/* <linux/personality.h> cannot be easily included */
1966#define PER_HPUX 0x10
1967	ldw	TASK_PERSONALITY(%r1),%r19
1968
1969	/* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */
1970	ldo	  -PER_HPUX(%r19), %r19
1971	CMPIB<>,n 0,%r19,1f
1972
1973	/* Save other hpux returns if personality is PER_HPUX */
1974	STREG     %r22,TASK_PT_GR22(%r1)
1975	STREG     %r29,TASK_PT_GR29(%r1)
19761:
1977
1978#endif /* CONFIG_HPUX */
1979
1980	/* Seems to me that dp could be wrong here, if the syscall involved
1981	 * calling a module, and nothing got round to restoring dp on return.
1982	 */
1983	loadgp
1984
1985syscall_check_resched:
1986
1987	/* check for reschedule */
1988
1989	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19	/* long */
1990	bb,<,n	%r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
1991
1992	.import do_signal,code
1993syscall_check_sig:
1994	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
1995	ldi	(_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r26
1996	and,COND(<>)	%r19, %r26, %r0
1997	b,n	syscall_restore	/* skip past if we've nothing to do */
1998
1999syscall_do_signal:
2000	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2001	ldo	TASK_REGS(%r1), %r26		/* struct pt_regs *regs */
2002	reg_save %r26
2003
2004#ifdef CONFIG_64BIT
2005	ldo	-16(%r30),%r29			/* Reference param save area */
2006#endif
2007
2008	BL	do_notify_resume,%r2
2009	ldi	1, %r25				/* long in_syscall = 1 */
2010
2011	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2012	ldo	TASK_REGS(%r1), %r20		/* reload pt_regs */
2013	reg_restore %r20
2014
2015	b,n     syscall_check_sig
2016
2017syscall_restore:
2018	/* Are we being ptraced? */
2019	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2020
2021	ldw	TASK_PTRACE(%r1), %r19
2022	bb,<	%r19,31,syscall_restore_rfi
2023	nop
2024
2025	ldo	TASK_PT_FR31(%r1),%r19		   /* reload fpregs */
2026	rest_fp	%r19
2027
2028	LDREG	TASK_PT_SAR(%r1),%r19		   /* restore SAR */
2029	mtsar	%r19
2030
2031	LDREG	TASK_PT_GR2(%r1),%r2		   /* restore user rp */
2032	LDREG	TASK_PT_GR19(%r1),%r19
2033	LDREG   TASK_PT_GR20(%r1),%r20
2034	LDREG	TASK_PT_GR21(%r1),%r21
2035	LDREG	TASK_PT_GR22(%r1),%r22
2036	LDREG	TASK_PT_GR23(%r1),%r23
2037	LDREG	TASK_PT_GR24(%r1),%r24
2038	LDREG	TASK_PT_GR25(%r1),%r25
2039	LDREG	TASK_PT_GR26(%r1),%r26
2040	LDREG	TASK_PT_GR27(%r1),%r27	   /* restore user dp */
2041	LDREG	TASK_PT_GR28(%r1),%r28	   /* syscall return value */
2042	LDREG	TASK_PT_GR29(%r1),%r29
2043	LDREG	TASK_PT_GR31(%r1),%r31	   /* restore syscall rp */
2044
2045	/* NOTE: We use rsm/ssm pair to make this operation atomic */
2046	rsm     PSW_SM_I, %r0
2047	LDREG   TASK_PT_GR30(%r1),%r30             /* restore user sp */
2048	mfsp	%sr3,%r1			   /* Get users space id */
2049	mtsp    %r1,%sr7                           /* Restore sr7 */
2050	ssm     PSW_SM_I, %r0
2051
2052	/* Set sr2 to zero for userspace syscalls to work. */
2053	mtsp	%r0,%sr2
2054	mtsp	%r1,%sr4			   /* Restore sr4 */
2055	mtsp	%r1,%sr5			   /* Restore sr5 */
2056	mtsp	%r1,%sr6			   /* Restore sr6 */
2057
2058	depi	3,31,2,%r31			   /* ensure return to user mode. */
2059
2060#ifdef CONFIG_64BIT
2061	/* decide whether to reset the wide mode bit
2062	 *
2063	 * For a syscall, the W bit is stored in the lowest bit
2064	 * of sp.  Extract it and reset W if it is zero */
2065	extrd,u,*<>	%r30,63,1,%r1
2066	rsm	PSW_SM_W, %r0
2067	/* now reset the lowest bit of sp if it was set */
2068	xor	%r30,%r1,%r30
2069#endif
2070	be,n    0(%sr3,%r31)                       /* return to user space */
2071
2072	/* We have to return via an RFI, so that PSW T and R bits can be set
2073	 * appropriately.
2074	 * This sets up pt_regs so we can return via intr_restore, which is not
2075	 * the most efficient way of doing things, but it works.
2076	 */
2077syscall_restore_rfi:
2078	ldo	-1(%r0),%r2			   /* Set recovery cntr to -1 */
2079	mtctl	%r2,%cr0			   /*   for immediate trap */
2080	LDREG	TASK_PT_PSW(%r1),%r2		   /* Get old PSW */
2081	ldi	0x0b,%r20			   /* Create new PSW */
2082	depi	-1,13,1,%r20			   /* C, Q, D, and I bits */
2083
2084	/* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are
2085	 * set in include/linux/ptrace.h and converted to PA bitmap
2086	 * numbers in asm-offsets.c */
2087
2088	/* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */
2089	extru,=	%r19,PA_SINGLESTEP_BIT,1,%r0
2090	depi	-1,27,1,%r20			   /* R bit */
2091
2092	/* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */
2093	extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0
2094	depi	-1,7,1,%r20			   /* T bit */
2095
2096	STREG	%r20,TASK_PT_PSW(%r1)
2097
2098	/* Always store space registers, since sr3 can be changed (e.g. fork) */
2099
2100	mfsp    %sr3,%r25
2101	STREG   %r25,TASK_PT_SR3(%r1)
2102	STREG   %r25,TASK_PT_SR4(%r1)
2103	STREG   %r25,TASK_PT_SR5(%r1)
2104	STREG   %r25,TASK_PT_SR6(%r1)
2105	STREG   %r25,TASK_PT_SR7(%r1)
2106	STREG   %r25,TASK_PT_IASQ0(%r1)
2107	STREG   %r25,TASK_PT_IASQ1(%r1)
2108
2109	/* Now if old D bit is clear, it means we didn't save all registers
2110	 * on syscall entry, so do that now.  This only happens on TRACEME
2111	 * calls, or if someone attached to us while we were on a syscall.
2112	 * We could make this more efficient by not saving r3-r18, but
2113	 * then we wouldn't be able to use the common intr_restore path.
2114	 * It is only for traced processes anyway, so performance is not
2115	 * an issue.
2116	 */
2117	bb,<	%r2,30,pt_regs_ok		   /* Branch if D set */
2118	ldo	TASK_REGS(%r1),%r25
2119	reg_save %r25				   /* Save r3 to r18 */
2120
2121	/* Save the current sr */
2122	mfsp	%sr0,%r2
2123	STREG	%r2,TASK_PT_SR0(%r1)
2124
2125	/* Save the scratch sr */
2126	mfsp	%sr1,%r2
2127	STREG	%r2,TASK_PT_SR1(%r1)
2128
2129	/* sr2 should be set to zero for userspace syscalls */
2130	STREG	%r0,TASK_PT_SR2(%r1)
2131
2132pt_regs_ok:
2133	LDREG	TASK_PT_GR31(%r1),%r2
2134	depi	3,31,2,%r2			   /* ensure return to user mode. */
2135	STREG	%r2,TASK_PT_IAOQ0(%r1)
2136	ldo	4(%r2),%r2
2137	STREG	%r2,TASK_PT_IAOQ1(%r1)
2138	copy	%r25,%r16
2139	b	intr_restore
2140	nop
2141
2142	.import schedule,code
2143syscall_do_resched:
2144	BL	schedule,%r2
2145#ifdef CONFIG_64BIT
2146	ldo	-16(%r30),%r29		/* Reference param save area */
2147#else
2148	nop
2149#endif
2150	b	syscall_check_resched	/* if resched, we start over again */
2151	nop
2152ENDPROC(syscall_exit)
2153
2154
2155get_register:
2156	/*
2157	 * get_register is used by the non access tlb miss handlers to
2158	 * copy the value of the general register specified in r8 into
2159	 * r1. This routine can't be used for shadowed registers, since
2160	 * the rfir will restore the original value. So, for the shadowed
2161	 * registers we put a -1 into r1 to indicate that the register
2162	 * should not be used (the register being copied could also have
2163	 * a -1 in it, but that is OK, it just means that we will have
2164	 * to use the slow path instead).
2165	 */
2166	blr     %r8,%r0
2167	nop
2168	bv      %r0(%r25)    /* r0 */
2169	copy    %r0,%r1
2170	bv      %r0(%r25)    /* r1 - shadowed */
2171	ldi     -1,%r1
2172	bv      %r0(%r25)    /* r2 */
2173	copy    %r2,%r1
2174	bv      %r0(%r25)    /* r3 */
2175	copy    %r3,%r1
2176	bv      %r0(%r25)    /* r4 */
2177	copy    %r4,%r1
2178	bv      %r0(%r25)    /* r5 */
2179	copy    %r5,%r1
2180	bv      %r0(%r25)    /* r6 */
2181	copy    %r6,%r1
2182	bv      %r0(%r25)    /* r7 */
2183	copy    %r7,%r1
2184	bv      %r0(%r25)    /* r8 - shadowed */
2185	ldi     -1,%r1
2186	bv      %r0(%r25)    /* r9 - shadowed */
2187	ldi     -1,%r1
2188	bv      %r0(%r25)    /* r10 */
2189	copy    %r10,%r1
2190	bv      %r0(%r25)    /* r11 */
2191	copy    %r11,%r1
2192	bv      %r0(%r25)    /* r12 */
2193	copy    %r12,%r1
2194	bv      %r0(%r25)    /* r13 */
2195	copy    %r13,%r1
2196	bv      %r0(%r25)    /* r14 */
2197	copy    %r14,%r1
2198	bv      %r0(%r25)    /* r15 */
2199	copy    %r15,%r1
2200	bv      %r0(%r25)    /* r16 - shadowed */
2201	ldi     -1,%r1
2202	bv      %r0(%r25)    /* r17 - shadowed */
2203	ldi     -1,%r1
2204	bv      %r0(%r25)    /* r18 */
2205	copy    %r18,%r1
2206	bv      %r0(%r25)    /* r19 */
2207	copy    %r19,%r1
2208	bv      %r0(%r25)    /* r20 */
2209	copy    %r20,%r1
2210	bv      %r0(%r25)    /* r21 */
2211	copy    %r21,%r1
2212	bv      %r0(%r25)    /* r22 */
2213	copy    %r22,%r1
2214	bv      %r0(%r25)    /* r23 */
2215	copy    %r23,%r1
2216	bv      %r0(%r25)    /* r24 - shadowed */
2217	ldi     -1,%r1
2218	bv      %r0(%r25)    /* r25 - shadowed */
2219	ldi     -1,%r1
2220	bv      %r0(%r25)    /* r26 */
2221	copy    %r26,%r1
2222	bv      %r0(%r25)    /* r27 */
2223	copy    %r27,%r1
2224	bv      %r0(%r25)    /* r28 */
2225	copy    %r28,%r1
2226	bv      %r0(%r25)    /* r29 */
2227	copy    %r29,%r1
2228	bv      %r0(%r25)    /* r30 */
2229	copy    %r30,%r1
2230	bv      %r0(%r25)    /* r31 */
2231	copy    %r31,%r1
2232
2233
2234set_register:
2235	/*
2236	 * set_register is used by the non access tlb miss handlers to
2237	 * copy the value of r1 into the general register specified in
2238	 * r8.
2239	 */
2240	blr     %r8,%r0
2241	nop
2242	bv      %r0(%r25)    /* r0 (silly, but it is a place holder) */
2243	copy    %r1,%r0
2244	bv      %r0(%r25)    /* r1 */
2245	copy    %r1,%r1
2246	bv      %r0(%r25)    /* r2 */
2247	copy    %r1,%r2
2248	bv      %r0(%r25)    /* r3 */
2249	copy    %r1,%r3
2250	bv      %r0(%r25)    /* r4 */
2251	copy    %r1,%r4
2252	bv      %r0(%r25)    /* r5 */
2253	copy    %r1,%r5
2254	bv      %r0(%r25)    /* r6 */
2255	copy    %r1,%r6
2256	bv      %r0(%r25)    /* r7 */
2257	copy    %r1,%r7
2258	bv      %r0(%r25)    /* r8 */
2259	copy    %r1,%r8
2260	bv      %r0(%r25)    /* r9 */
2261	copy    %r1,%r9
2262	bv      %r0(%r25)    /* r10 */
2263	copy    %r1,%r10
2264	bv      %r0(%r25)    /* r11 */
2265	copy    %r1,%r11
2266	bv      %r0(%r25)    /* r12 */
2267	copy    %r1,%r12
2268	bv      %r0(%r25)    /* r13 */
2269	copy    %r1,%r13
2270	bv      %r0(%r25)    /* r14 */
2271	copy    %r1,%r14
2272	bv      %r0(%r25)    /* r15 */
2273	copy    %r1,%r15
2274	bv      %r0(%r25)    /* r16 */
2275	copy    %r1,%r16
2276	bv      %r0(%r25)    /* r17 */
2277	copy    %r1,%r17
2278	bv      %r0(%r25)    /* r18 */
2279	copy    %r1,%r18
2280	bv      %r0(%r25)    /* r19 */
2281	copy    %r1,%r19
2282	bv      %r0(%r25)    /* r20 */
2283	copy    %r1,%r20
2284	bv      %r0(%r25)    /* r21 */
2285	copy    %r1,%r21
2286	bv      %r0(%r25)    /* r22 */
2287	copy    %r1,%r22
2288	bv      %r0(%r25)    /* r23 */
2289	copy    %r1,%r23
2290	bv      %r0(%r25)    /* r24 */
2291	copy    %r1,%r24
2292	bv      %r0(%r25)    /* r25 */
2293	copy    %r1,%r25
2294	bv      %r0(%r25)    /* r26 */
2295	copy    %r1,%r26
2296	bv      %r0(%r25)    /* r27 */
2297	copy    %r1,%r27
2298	bv      %r0(%r25)    /* r28 */
2299	copy    %r1,%r28
2300	bv      %r0(%r25)    /* r29 */
2301	copy    %r1,%r29
2302	bv      %r0(%r25)    /* r30 */
2303	copy    %r1,%r30
2304	bv      %r0(%r25)    /* r31 */
2305	copy    %r1,%r31
2306