1/* $Id: ultra.S,v 1.1.1.1 2007-08-03 18:52:19 $
2 * ultra.S: Don't expand these all over the place...
3 *
4 * Copyright (C) 1997, 2000 David S. Miller (davem@redhat.com)
5 */
6
7#include <asm/asi.h>
8#include <asm/pgtable.h>
9#include <asm/page.h>
10#include <asm/spitfire.h>
11#include <asm/mmu_context.h>
12#include <asm/mmu.h>
13#include <asm/pil.h>
14#include <asm/head.h>
15#include <asm/thread_info.h>
16#include <asm/cacheflush.h>
17#include <asm/hypervisor.h>
18
19	/* Basically, most of the Spitfire vs. Cheetah madness
20	 * has to do with the fact that Cheetah does not support
21	 * IMMU flushes out of the secondary context.  Someone needs
22	 * to throw a south lake birthday party for the folks
23	 * in Microelectronics who refused to fix this shit.
24	 */
25
26	/* This file is meant to be read efficiently by the CPU, not humans.
27	 * Staraj sie tego nikomu nie pierdolnac...
28	 */
29	.text
30	.align		32
31	.globl		__flush_tlb_mm
32__flush_tlb_mm:		/* 18 insns */
33	/* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
34	ldxa		[%o1] ASI_DMMU, %g2
35	cmp		%g2, %o0
36	bne,pn		%icc, __spitfire_flush_tlb_mm_slow
37	 mov		0x50, %g3
38	stxa		%g0, [%g3] ASI_DMMU_DEMAP
39	stxa		%g0, [%g3] ASI_IMMU_DEMAP
40	sethi		%hi(KERNBASE), %g3
41	flush		%g3
42	retl
43	 nop
44	nop
45	nop
46	nop
47	nop
48	nop
49	nop
50	nop
51	nop
52	nop
53
54	.align		32
55	.globl		__flush_tlb_pending
56__flush_tlb_pending:	/* 26 insns */
57	/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
58	rdpr		%pstate, %g7
59	sllx		%o1, 3, %o1
60	andn		%g7, PSTATE_IE, %g2
61	wrpr		%g2, %pstate
62	mov		SECONDARY_CONTEXT, %o4
63	ldxa		[%o4] ASI_DMMU, %g2
64	stxa		%o0, [%o4] ASI_DMMU
651:	sub		%o1, (1 << 3), %o1
66	ldx		[%o2 + %o1], %o3
67	andcc		%o3, 1, %g0
68	andn		%o3, 1, %o3
69	be,pn		%icc, 2f
70	 or		%o3, 0x10, %o3
71	stxa		%g0, [%o3] ASI_IMMU_DEMAP
722:	stxa		%g0, [%o3] ASI_DMMU_DEMAP
73	membar		#Sync
74	brnz,pt		%o1, 1b
75	 nop
76	stxa		%g2, [%o4] ASI_DMMU
77	sethi		%hi(KERNBASE), %o4
78	flush		%o4
79	retl
80	 wrpr		%g7, 0x0, %pstate
81	nop
82	nop
83	nop
84	nop
85
86	.align		32
87	.globl		__flush_tlb_kernel_range
88__flush_tlb_kernel_range:	/* 16 insns */
89	/* %o0=start, %o1=end */
90	cmp		%o0, %o1
91	be,pn		%xcc, 2f
92	 sethi		%hi(PAGE_SIZE), %o4
93	sub		%o1, %o0, %o3
94	sub		%o3, %o4, %o3
95	or		%o0, 0x20, %o0		! Nucleus
961:	stxa		%g0, [%o0 + %o3] ASI_DMMU_DEMAP
97	stxa		%g0, [%o0 + %o3] ASI_IMMU_DEMAP
98	membar		#Sync
99	brnz,pt		%o3, 1b
100	 sub		%o3, %o4, %o3
1012:	sethi		%hi(KERNBASE), %o3
102	flush		%o3
103	retl
104	 nop
105	nop
106
107__spitfire_flush_tlb_mm_slow:
108	rdpr		%pstate, %g1
109	wrpr		%g1, PSTATE_IE, %pstate
110	stxa		%o0, [%o1] ASI_DMMU
111	stxa		%g0, [%g3] ASI_DMMU_DEMAP
112	stxa		%g0, [%g3] ASI_IMMU_DEMAP
113	flush		%g6
114	stxa		%g2, [%o1] ASI_DMMU
115	sethi		%hi(KERNBASE), %o1
116	flush		%o1
117	retl
118	 wrpr		%g1, 0, %pstate
119
120/*
121 * The following code flushes one page_size worth.
122 */
123#if (PAGE_SHIFT == 13)
124#define ITAG_MASK 0xfe
125#elif (PAGE_SHIFT == 16)
126#define ITAG_MASK 0x7fe
127#else
128#error unsupported PAGE_SIZE
129#endif
130	.section .kprobes.text, "ax"
131	.align		32
132	.globl		__flush_icache_page
133__flush_icache_page:	/* %o0 = phys_page */
134	membar		#StoreStore
135	srlx		%o0, PAGE_SHIFT, %o0
136	sethi		%uhi(PAGE_OFFSET), %g1
137	sllx		%o0, PAGE_SHIFT, %o0
138	sethi		%hi(PAGE_SIZE), %g2
139	sllx		%g1, 32, %g1
140	add		%o0, %g1, %o0
1411:	subcc		%g2, 32, %g2
142	bne,pt		%icc, 1b
143	 flush		%o0 + %g2
144	retl
145	 nop
146
147#ifdef DCACHE_ALIASING_POSSIBLE
148
149#if (PAGE_SHIFT != 13)
150#error only page shift of 13 is supported by dcache flush
151#endif
152
153#define DTAG_MASK 0x3
154
155	/* This routine is Spitfire specific so the hardcoded
156	 * D-cache size and line-size are OK.
157	 */
158	.align		64
159	.globl		__flush_dcache_page
160__flush_dcache_page:	/* %o0=kaddr, %o1=flush_icache */
161	sethi		%uhi(PAGE_OFFSET), %g1
162	sllx		%g1, 32, %g1
163	sub		%o0, %g1, %o0			! physical address
164	srlx		%o0, 11, %o0			! make D-cache TAG
165	sethi		%hi(1 << 14), %o2		! D-cache size
166	sub		%o2, (1 << 5), %o2		! D-cache line size
1671:	ldxa		[%o2] ASI_DCACHE_TAG, %o3	! load D-cache TAG
168	andcc		%o3, DTAG_MASK, %g0		! Valid?
169	be,pn		%xcc, 2f			! Nope, branch
170	 andn		%o3, DTAG_MASK, %o3		! Clear valid bits
171	cmp		%o3, %o0			! TAG match?
172	bne,pt		%xcc, 2f			! Nope, branch
173	 nop
174	stxa		%g0, [%o2] ASI_DCACHE_TAG	! Invalidate TAG
175	membar		#Sync
1762:	brnz,pt		%o2, 1b
177	 sub		%o2, (1 << 5), %o2		! D-cache line size
178
179	/* The I-cache does not snoop local stores so we
180	 * better flush that too when necessary.
181	 */
182	brnz,pt		%o1, __flush_icache_page
183	 sllx		%o0, 11, %o0
184	retl
185	 nop
186
187#endif /* DCACHE_ALIASING_POSSIBLE */
188
189	.previous
190
191	/* Cheetah specific versions, patched at boot time. */
192__cheetah_flush_tlb_mm: /* 19 insns */
193	rdpr		%pstate, %g7
194	andn		%g7, PSTATE_IE, %g2
195	wrpr		%g2, 0x0, %pstate
196	wrpr		%g0, 1, %tl
197	mov		PRIMARY_CONTEXT, %o2
198	mov		0x40, %g3
199	ldxa		[%o2] ASI_DMMU, %g2
200	srlx		%g2, CTX_PGSZ1_NUC_SHIFT, %o1
201	sllx		%o1, CTX_PGSZ1_NUC_SHIFT, %o1
202	or		%o0, %o1, %o0	/* Preserve nucleus page size fields */
203	stxa		%o0, [%o2] ASI_DMMU
204	stxa		%g0, [%g3] ASI_DMMU_DEMAP
205	stxa		%g0, [%g3] ASI_IMMU_DEMAP
206	stxa		%g2, [%o2] ASI_DMMU
207	sethi		%hi(KERNBASE), %o2
208	flush		%o2
209	wrpr		%g0, 0, %tl
210	retl
211	 wrpr		%g7, 0x0, %pstate
212
213__cheetah_flush_tlb_pending:	/* 27 insns */
214	/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
215	rdpr		%pstate, %g7
216	sllx		%o1, 3, %o1
217	andn		%g7, PSTATE_IE, %g2
218	wrpr		%g2, 0x0, %pstate
219	wrpr		%g0, 1, %tl
220	mov		PRIMARY_CONTEXT, %o4
221	ldxa		[%o4] ASI_DMMU, %g2
222	srlx		%g2, CTX_PGSZ1_NUC_SHIFT, %o3
223	sllx		%o3, CTX_PGSZ1_NUC_SHIFT, %o3
224	or		%o0, %o3, %o0	/* Preserve nucleus page size fields */
225	stxa		%o0, [%o4] ASI_DMMU
2261:	sub		%o1, (1 << 3), %o1
227	ldx		[%o2 + %o1], %o3
228	andcc		%o3, 1, %g0
229	be,pn		%icc, 2f
230	 andn		%o3, 1, %o3
231	stxa		%g0, [%o3] ASI_IMMU_DEMAP
2322:	stxa		%g0, [%o3] ASI_DMMU_DEMAP
233	membar		#Sync
234	brnz,pt		%o1, 1b
235	 nop
236	stxa		%g2, [%o4] ASI_DMMU
237	sethi		%hi(KERNBASE), %o4
238	flush		%o4
239	wrpr		%g0, 0, %tl
240	retl
241	 wrpr		%g7, 0x0, %pstate
242
243#ifdef DCACHE_ALIASING_POSSIBLE
244__cheetah_flush_dcache_page: /* 11 insns */
245	sethi		%uhi(PAGE_OFFSET), %g1
246	sllx		%g1, 32, %g1
247	sub		%o0, %g1, %o0
248	sethi		%hi(PAGE_SIZE), %o4
2491:	subcc		%o4, (1 << 5), %o4
250	stxa		%g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
251	membar		#Sync
252	bne,pt		%icc, 1b
253	 nop
254	retl		/* I-cache flush never needed on Cheetah, see callers. */
255	 nop
256#endif /* DCACHE_ALIASING_POSSIBLE */
257
258	/* Hypervisor specific versions, patched at boot time.  */
259__hypervisor_tlb_tl0_error:
260	save		%sp, -192, %sp
261	mov		%i0, %o0
262	call		hypervisor_tlbop_error
263	 mov		%i1, %o1
264	ret
265	 restore
266
267__hypervisor_flush_tlb_mm: /* 10 insns */
268	mov		%o0, %o2	/* ARG2: mmu context */
269	mov		0, %o0		/* ARG0: CPU lists unimplemented */
270	mov		0, %o1		/* ARG1: CPU lists unimplemented */
271	mov		HV_MMU_ALL, %o3	/* ARG3: flags */
272	mov		HV_FAST_MMU_DEMAP_CTX, %o5
273	ta		HV_FAST_TRAP
274	brnz,pn		%o0, __hypervisor_tlb_tl0_error
275	 mov		HV_FAST_MMU_DEMAP_CTX, %o1
276	retl
277	 nop
278
279__hypervisor_flush_tlb_pending: /* 16 insns */
280	/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
281	sllx		%o1, 3, %g1
282	mov		%o2, %g2
283	mov		%o0, %g3
2841:	sub		%g1, (1 << 3), %g1
285	ldx		[%g2 + %g1], %o0      /* ARG0: vaddr + IMMU-bit */
286	mov		%g3, %o1	      /* ARG1: mmu context */
287	mov		HV_MMU_ALL, %o2	      /* ARG2: flags */
288	srlx		%o0, PAGE_SHIFT, %o0
289	sllx		%o0, PAGE_SHIFT, %o0
290	ta		HV_MMU_UNMAP_ADDR_TRAP
291	brnz,pn		%o0, __hypervisor_tlb_tl0_error
292	 mov		HV_MMU_UNMAP_ADDR_TRAP, %o1
293	brnz,pt		%g1, 1b
294	 nop
295	retl
296	 nop
297
298__hypervisor_flush_tlb_kernel_range: /* 16 insns */
299	/* %o0=start, %o1=end */
300	cmp		%o0, %o1
301	be,pn		%xcc, 2f
302	 sethi		%hi(PAGE_SIZE), %g3
303	mov		%o0, %g1
304	sub		%o1, %g1, %g2
305	sub		%g2, %g3, %g2
3061:	add		%g1, %g2, %o0	/* ARG0: virtual address */
307	mov		0, %o1		/* ARG1: mmu context */
308	mov		HV_MMU_ALL, %o2	/* ARG2: flags */
309	ta		HV_MMU_UNMAP_ADDR_TRAP
310	brnz,pn		%o0, __hypervisor_tlb_tl0_error
311	 mov		HV_MMU_UNMAP_ADDR_TRAP, %o1
312	brnz,pt		%g2, 1b
313	 sub		%g2, %g3, %g2
3142:	retl
315	 nop
316
317#ifdef DCACHE_ALIASING_POSSIBLE
318__hypervisor_flush_dcache_page:	/* 2 insns */
319	retl
320	 nop
321#endif
322
323tlb_patch_one:
3241:	lduw		[%o1], %g1
325	stw		%g1, [%o0]
326	flush		%o0
327	subcc		%o2, 1, %o2
328	add		%o1, 4, %o1
329	bne,pt		%icc, 1b
330	 add		%o0, 4, %o0
331	retl
332	 nop
333
334	.globl		cheetah_patch_cachetlbops
335cheetah_patch_cachetlbops:
336	save		%sp, -128, %sp
337
338	sethi		%hi(__flush_tlb_mm), %o0
339	or		%o0, %lo(__flush_tlb_mm), %o0
340	sethi		%hi(__cheetah_flush_tlb_mm), %o1
341	or		%o1, %lo(__cheetah_flush_tlb_mm), %o1
342	call		tlb_patch_one
343	 mov		19, %o2
344
345	sethi		%hi(__flush_tlb_pending), %o0
346	or		%o0, %lo(__flush_tlb_pending), %o0
347	sethi		%hi(__cheetah_flush_tlb_pending), %o1
348	or		%o1, %lo(__cheetah_flush_tlb_pending), %o1
349	call		tlb_patch_one
350	 mov		27, %o2
351
352#ifdef DCACHE_ALIASING_POSSIBLE
353	sethi		%hi(__flush_dcache_page), %o0
354	or		%o0, %lo(__flush_dcache_page), %o0
355	sethi		%hi(__cheetah_flush_dcache_page), %o1
356	or		%o1, %lo(__cheetah_flush_dcache_page), %o1
357	call		tlb_patch_one
358	 mov		11, %o2
359#endif /* DCACHE_ALIASING_POSSIBLE */
360
361	ret
362	 restore
363
364#ifdef CONFIG_SMP
365	/* These are all called by the slaves of a cross call, at
366	 * trap level 1, with interrupts fully disabled.
367	 *
368	 * Register usage:
369	 *   %g5	mm->context	(all tlb flushes)
370	 *   %g1	address arg 1	(tlb page and range flushes)
371	 *   %g7	address arg 2	(tlb range flush only)
372	 *
373	 *   %g6	scratch 1
374	 *   %g2	scratch 2
375	 *   %g3	scratch 3
376	 *   %g4	scratch 4
377	 */
378	.align		32
379	.globl		xcall_flush_tlb_mm
380xcall_flush_tlb_mm:	/* 21 insns */
381	mov		PRIMARY_CONTEXT, %g2
382	ldxa		[%g2] ASI_DMMU, %g3
383	srlx		%g3, CTX_PGSZ1_NUC_SHIFT, %g4
384	sllx		%g4, CTX_PGSZ1_NUC_SHIFT, %g4
385	or		%g5, %g4, %g5	/* Preserve nucleus page size fields */
386	stxa		%g5, [%g2] ASI_DMMU
387	mov		0x40, %g4
388	stxa		%g0, [%g4] ASI_DMMU_DEMAP
389	stxa		%g0, [%g4] ASI_IMMU_DEMAP
390	stxa		%g3, [%g2] ASI_DMMU
391	retry
392	nop
393	nop
394	nop
395	nop
396	nop
397	nop
398	nop
399	nop
400	nop
401	nop
402
403	.globl		xcall_flush_tlb_pending
404xcall_flush_tlb_pending:	/* 21 insns */
405	/* %g5=context, %g1=nr, %g7=vaddrs[] */
406	sllx		%g1, 3, %g1
407	mov		PRIMARY_CONTEXT, %g4
408	ldxa		[%g4] ASI_DMMU, %g2
409	srlx		%g2, CTX_PGSZ1_NUC_SHIFT, %g4
410	sllx		%g4, CTX_PGSZ1_NUC_SHIFT, %g4
411	or		%g5, %g4, %g5
412	mov		PRIMARY_CONTEXT, %g4
413	stxa		%g5, [%g4] ASI_DMMU
4141:	sub		%g1, (1 << 3), %g1
415	ldx		[%g7 + %g1], %g5
416	andcc		%g5, 0x1, %g0
417	be,pn		%icc, 2f
418
419	 andn		%g5, 0x1, %g5
420	stxa		%g0, [%g5] ASI_IMMU_DEMAP
4212:	stxa		%g0, [%g5] ASI_DMMU_DEMAP
422	membar		#Sync
423	brnz,pt		%g1, 1b
424	 nop
425	stxa		%g2, [%g4] ASI_DMMU
426	retry
427	nop
428
429	.globl		xcall_flush_tlb_kernel_range
430xcall_flush_tlb_kernel_range:	/* 25 insns */
431	sethi		%hi(PAGE_SIZE - 1), %g2
432	or		%g2, %lo(PAGE_SIZE - 1), %g2
433	andn		%g1, %g2, %g1
434	andn		%g7, %g2, %g7
435	sub		%g7, %g1, %g3
436	add		%g2, 1, %g2
437	sub		%g3, %g2, %g3
438	or		%g1, 0x20, %g1		! Nucleus
4391:	stxa		%g0, [%g1 + %g3] ASI_DMMU_DEMAP
440	stxa		%g0, [%g1 + %g3] ASI_IMMU_DEMAP
441	membar		#Sync
442	brnz,pt		%g3, 1b
443	 sub		%g3, %g2, %g3
444	retry
445	nop
446	nop
447	nop
448	nop
449	nop
450	nop
451	nop
452	nop
453	nop
454	nop
455	nop
456
457	/* This runs in a very controlled environment, so we do
458	 * not need to worry about BH races etc.
459	 */
460	.globl		xcall_sync_tick
461xcall_sync_tick:
462
463661:	rdpr		%pstate, %g2
464	wrpr		%g2, PSTATE_IG | PSTATE_AG, %pstate
465	.section	.sun4v_2insn_patch, "ax"
466	.word		661b
467	nop
468	nop
469	.previous
470
471	rdpr		%pil, %g2
472	wrpr		%g0, 15, %pil
473	sethi		%hi(109f), %g7
474	b,pt		%xcc, etrap_irq
475109:	 or		%g7, %lo(109b), %g7
476#ifdef CONFIG_TRACE_IRQFLAGS
477	call		trace_hardirqs_off
478	 nop
479#endif
480	call		smp_synchronize_tick_client
481	 nop
482	clr		%l6
483	b		rtrap_xcall
484	 ldx		[%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
485
486	/* NOTE: This is SPECIAL!!  We do etrap/rtrap however
487	 *       we choose to deal with the "BH's run with
488	 *       %pil==15" problem (described in asm/pil.h)
489	 *       by just invoking rtrap directly past where
490	 *       BH's are checked for.
491	 *
492	 *       We do it like this because we do not want %pil==15
493	 *       lockups to prevent regs being reported.
494	 */
495	.globl		xcall_report_regs
496xcall_report_regs:
497
498661:	rdpr		%pstate, %g2
499	wrpr		%g2, PSTATE_IG | PSTATE_AG, %pstate
500	.section	.sun4v_2insn_patch, "ax"
501	.word		661b
502	nop
503	nop
504	.previous
505
506	rdpr		%pil, %g2
507	wrpr		%g0, 15, %pil
508	sethi		%hi(109f), %g7
509	b,pt		%xcc, etrap_irq
510109:	 or		%g7, %lo(109b), %g7
511#ifdef CONFIG_TRACE_IRQFLAGS
512	call		trace_hardirqs_off
513	 nop
514#endif
515	call		__show_regs
516	 add		%sp, PTREGS_OFF, %o0
517	clr		%l6
518	/* Has to be a non-v9 branch due to the large distance. */
519	b		rtrap_xcall
520	 ldx		[%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
521
522#ifdef DCACHE_ALIASING_POSSIBLE
523	.align		32
524	.globl		xcall_flush_dcache_page_cheetah
525xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
526	sethi		%hi(PAGE_SIZE), %g3
5271:	subcc		%g3, (1 << 5), %g3
528	stxa		%g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
529	membar		#Sync
530	bne,pt		%icc, 1b
531	 nop
532	retry
533	nop
534#endif /* DCACHE_ALIASING_POSSIBLE */
535
536	.globl		xcall_flush_dcache_page_spitfire
537xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
538				     %g7 == kernel page virtual address
539				     %g5 == (page->mapping != NULL)  */
540#ifdef DCACHE_ALIASING_POSSIBLE
541	srlx		%g1, (13 - 2), %g1	! Form tag comparitor
542	sethi		%hi(L1DCACHE_SIZE), %g3	! D$ size == 16K
543	sub		%g3, (1 << 5), %g3	! D$ linesize == 32
5441:	ldxa		[%g3] ASI_DCACHE_TAG, %g2
545	andcc		%g2, 0x3, %g0
546	be,pn		%xcc, 2f
547	 andn		%g2, 0x3, %g2
548	cmp		%g2, %g1
549
550	bne,pt		%xcc, 2f
551	 nop
552	stxa		%g0, [%g3] ASI_DCACHE_TAG
553	membar		#Sync
5542:	cmp		%g3, 0
555	bne,pt		%xcc, 1b
556	 sub		%g3, (1 << 5), %g3
557
558	brz,pn		%g5, 2f
559#endif /* DCACHE_ALIASING_POSSIBLE */
560	 sethi		%hi(PAGE_SIZE), %g3
561
5621:	flush		%g7
563	subcc		%g3, (1 << 5), %g3
564	bne,pt		%icc, 1b
565	 add		%g7, (1 << 5), %g7
566
5672:	retry
568	nop
569	nop
570
571	/* %g5:	error
572	 * %g6:	tlb op
573	 */
574__hypervisor_tlb_xcall_error:
575	mov	%g5, %g4
576	mov	%g6, %g5
577	ba,pt	%xcc, etrap
578	 rd	%pc, %g7
579	mov	%l4, %o0
580	call	hypervisor_tlbop_error_xcall
581	 mov	%l5, %o1
582	ba,a,pt	%xcc, rtrap_clr_l6
583
584	.globl		__hypervisor_xcall_flush_tlb_mm
585__hypervisor_xcall_flush_tlb_mm: /* 21 insns */
586	/* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
587	mov		%o0, %g2
588	mov		%o1, %g3
589	mov		%o2, %g4
590	mov		%o3, %g1
591	mov		%o5, %g7
592	clr		%o0		/* ARG0: CPU lists unimplemented */
593	clr		%o1		/* ARG1: CPU lists unimplemented */
594	mov		%g5, %o2	/* ARG2: mmu context */
595	mov		HV_MMU_ALL, %o3	/* ARG3: flags */
596	mov		HV_FAST_MMU_DEMAP_CTX, %o5
597	ta		HV_FAST_TRAP
598	mov		HV_FAST_MMU_DEMAP_CTX, %g6
599	brnz,pn		%o0, __hypervisor_tlb_xcall_error
600	 mov		%o0, %g5
601	mov		%g2, %o0
602	mov		%g3, %o1
603	mov		%g4, %o2
604	mov		%g1, %o3
605	mov		%g7, %o5
606	membar		#Sync
607	retry
608
609	.globl		__hypervisor_xcall_flush_tlb_pending
610__hypervisor_xcall_flush_tlb_pending: /* 21 insns */
611	/* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */
612	sllx		%g1, 3, %g1
613	mov		%o0, %g2
614	mov		%o1, %g3
615	mov		%o2, %g4
6161:	sub		%g1, (1 << 3), %g1
617	ldx		[%g7 + %g1], %o0	/* ARG0: virtual address */
618	mov		%g5, %o1		/* ARG1: mmu context */
619	mov		HV_MMU_ALL, %o2		/* ARG2: flags */
620	srlx		%o0, PAGE_SHIFT, %o0
621	sllx		%o0, PAGE_SHIFT, %o0
622	ta		HV_MMU_UNMAP_ADDR_TRAP
623	mov		HV_MMU_UNMAP_ADDR_TRAP, %g6
624	brnz,a,pn	%o0, __hypervisor_tlb_xcall_error
625	 mov		%o0, %g5
626	brnz,pt		%g1, 1b
627	 nop
628	mov		%g2, %o0
629	mov		%g3, %o1
630	mov		%g4, %o2
631	membar		#Sync
632	retry
633
634	.globl		__hypervisor_xcall_flush_tlb_kernel_range
635__hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */
636	/* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
637	sethi		%hi(PAGE_SIZE - 1), %g2
638	or		%g2, %lo(PAGE_SIZE - 1), %g2
639	andn		%g1, %g2, %g1
640	andn		%g7, %g2, %g7
641	sub		%g7, %g1, %g3
642	add		%g2, 1, %g2
643	sub		%g3, %g2, %g3
644	mov		%o0, %g2
645	mov		%o1, %g4
646	mov		%o2, %g7
6471:	add		%g1, %g3, %o0	/* ARG0: virtual address */
648	mov		0, %o1		/* ARG1: mmu context */
649	mov		HV_MMU_ALL, %o2	/* ARG2: flags */
650	ta		HV_MMU_UNMAP_ADDR_TRAP
651	mov		HV_MMU_UNMAP_ADDR_TRAP, %g6
652	brnz,pn		%o0, __hypervisor_tlb_xcall_error
653	 mov		%o0, %g5
654	sethi		%hi(PAGE_SIZE), %o2
655	brnz,pt		%g3, 1b
656	 sub		%g3, %o2, %g3
657	mov		%g2, %o0
658	mov		%g4, %o1
659	mov		%g7, %o2
660	membar		#Sync
661	retry
662
663	/* These just get rescheduled to PIL vectors. */
664	.globl		xcall_call_function
665xcall_call_function:
666	wr		%g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
667	retry
668
669	.globl		xcall_receive_signal
670xcall_receive_signal:
671	wr		%g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
672	retry
673
674	.globl		xcall_capture
675xcall_capture:
676	wr		%g0, (1 << PIL_SMP_CAPTURE), %set_softint
677	retry
678
679	.globl		xcall_new_mmu_context_version
680xcall_new_mmu_context_version:
681	wr		%g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
682	retry
683
684#endif /* CONFIG_SMP */
685
686
687	.globl		hypervisor_patch_cachetlbops
688hypervisor_patch_cachetlbops:
689	save		%sp, -128, %sp
690
691	sethi		%hi(__flush_tlb_mm), %o0
692	or		%o0, %lo(__flush_tlb_mm), %o0
693	sethi		%hi(__hypervisor_flush_tlb_mm), %o1
694	or		%o1, %lo(__hypervisor_flush_tlb_mm), %o1
695	call		tlb_patch_one
696	 mov		10, %o2
697
698	sethi		%hi(__flush_tlb_pending), %o0
699	or		%o0, %lo(__flush_tlb_pending), %o0
700	sethi		%hi(__hypervisor_flush_tlb_pending), %o1
701	or		%o1, %lo(__hypervisor_flush_tlb_pending), %o1
702	call		tlb_patch_one
703	 mov		16, %o2
704
705	sethi		%hi(__flush_tlb_kernel_range), %o0
706	or		%o0, %lo(__flush_tlb_kernel_range), %o0
707	sethi		%hi(__hypervisor_flush_tlb_kernel_range), %o1
708	or		%o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
709	call		tlb_patch_one
710	 mov		16, %o2
711
712#ifdef DCACHE_ALIASING_POSSIBLE
713	sethi		%hi(__flush_dcache_page), %o0
714	or		%o0, %lo(__flush_dcache_page), %o0
715	sethi		%hi(__hypervisor_flush_dcache_page), %o1
716	or		%o1, %lo(__hypervisor_flush_dcache_page), %o1
717	call		tlb_patch_one
718	 mov		2, %o2
719#endif /* DCACHE_ALIASING_POSSIBLE */
720
721#ifdef CONFIG_SMP
722	sethi		%hi(xcall_flush_tlb_mm), %o0
723	or		%o0, %lo(xcall_flush_tlb_mm), %o0
724	sethi		%hi(__hypervisor_xcall_flush_tlb_mm), %o1
725	or		%o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
726	call		tlb_patch_one
727	 mov		21, %o2
728
729	sethi		%hi(xcall_flush_tlb_pending), %o0
730	or		%o0, %lo(xcall_flush_tlb_pending), %o0
731	sethi		%hi(__hypervisor_xcall_flush_tlb_pending), %o1
732	or		%o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1
733	call		tlb_patch_one
734	 mov		21, %o2
735
736	sethi		%hi(xcall_flush_tlb_kernel_range), %o0
737	or		%o0, %lo(xcall_flush_tlb_kernel_range), %o0
738	sethi		%hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
739	or		%o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
740	call		tlb_patch_one
741	 mov		25, %o2
742#endif /* CONFIG_SMP */
743
744	ret
745	 restore
746