1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 *
4 * Copyright SUSE Linux Products GmbH 2009
5 *
6 * Authors: Alexander Graf <agraf@suse.de>
7 */
8
9#ifndef __ASM_KVM_BOOK3S_H__
10#define __ASM_KVM_BOOK3S_H__
11
12#include <linux/types.h>
13#include <linux/kvm_host.h>
14#include <asm/kvm_book3s_asm.h>
15#include <asm/guest-state-buffer.h>
16
17struct kvmppc_bat {
18	u64 raw;
19	u32 bepi;
20	u32 bepi_mask;
21	u32 brpn;
22	u8 wimg;
23	u8 pp;
24	bool vs		: 1;
25	bool vp		: 1;
26};
27
28struct kvmppc_sid_map {
29	u64 guest_vsid;
30	u64 guest_esid;
31	u64 host_vsid;
32	bool valid	: 1;
33};
34
35#define SID_MAP_BITS    9
36#define SID_MAP_NUM     (1 << SID_MAP_BITS)
37#define SID_MAP_MASK    (SID_MAP_NUM - 1)
38
39#ifdef CONFIG_PPC_BOOK3S_64
40#define SID_CONTEXTS	1
41#else
42#define SID_CONTEXTS	128
43#define VSID_POOL_SIZE	(SID_CONTEXTS * 16)
44#endif
45
46struct hpte_cache {
47	struct hlist_node list_pte;
48	struct hlist_node list_pte_long;
49	struct hlist_node list_vpte;
50	struct hlist_node list_vpte_long;
51#ifdef CONFIG_PPC_BOOK3S_64
52	struct hlist_node list_vpte_64k;
53#endif
54	struct rcu_head rcu_head;
55	u64 host_vpn;
56	u64 pfn;
57	ulong slot;
58	struct kvmppc_pte pte;
59	int pagesize;
60};
61
62/*
63 * Struct for a virtual core.
64 * Note: entry_exit_map combines a bitmap of threads that have entered
65 * in the bottom 8 bits and a bitmap of threads that have exited in the
66 * next 8 bits.  This is so that we can atomically set the entry bit
67 * iff the exit map is 0 without taking a lock.
68 */
69struct kvmppc_vcore {
70	int n_runnable;
71	int num_threads;
72	int entry_exit_map;
73	int napping_threads;
74	int first_vcpuid;
75	u16 pcpu;
76	u16 last_cpu;
77	u8 vcore_state;
78	u8 in_guest;
79	struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
80	struct list_head preempt_list;
81	spinlock_t lock;
82	struct rcuwait wait;
83	spinlock_t stoltb_lock;	/* protects stolen_tb and preempt_tb */
84	u64 stolen_tb;
85	u64 preempt_tb;
86	struct kvm_vcpu *runner;
87	struct kvm *kvm;
88	u64 tb_offset;		/* guest timebase - host timebase */
89	u64 tb_offset_applied;	/* timebase offset currently in force */
90	ulong lpcr;
91	u32 arch_compat;
92	ulong pcr;
93	ulong dpdes;		/* doorbell state (POWER8) */
94	ulong vtb;		/* virtual timebase */
95	ulong conferring_threads;
96	unsigned int halt_poll_ns;
97	atomic_t online_count;
98};
99
100struct kvmppc_vcpu_book3s {
101	struct kvmppc_sid_map sid_map[SID_MAP_NUM];
102	struct {
103		u64 esid;
104		u64 vsid;
105	} slb_shadow[64];
106	u8 slb_shadow_max;
107	struct kvmppc_bat ibat[8];
108	struct kvmppc_bat dbat[8];
109	u64 hid[6];
110	u64 gqr[8];
111	u64 sdr1;
112	u64 hior;
113	u64 msr_mask;
114	u64 vtb;
115#ifdef CONFIG_PPC_BOOK3S_32
116	u32 vsid_pool[VSID_POOL_SIZE];
117	u32 vsid_next;
118#else
119	u64 proto_vsid_first;
120	u64 proto_vsid_max;
121	u64 proto_vsid_next;
122#endif
123	int context_id[SID_CONTEXTS];
124
125	bool hior_explicit;		/* HIOR is set by ioctl, not PVR */
126
127	struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
128	struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
129	struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
130	struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
131#ifdef CONFIG_PPC_BOOK3S_64
132	struct hlist_head hpte_hash_vpte_64k[HPTEG_HASH_NUM_VPTE_64K];
133#endif
134	int hpte_cache_count;
135	spinlock_t mmu_lock;
136};
137
138#define VSID_REAL	0x07ffffffffc00000ULL
139#define VSID_BAT	0x07ffffffffb00000ULL
140#define VSID_64K	0x0800000000000000ULL
141#define VSID_1T		0x1000000000000000ULL
142#define VSID_REAL_DR	0x2000000000000000ULL
143#define VSID_REAL_IR	0x4000000000000000ULL
144#define VSID_PR		0x8000000000000000ULL
145
146extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask);
147extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
148extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
149extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
150extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
151extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
152extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
153extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
154			       bool iswrite);
155extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
156extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
157extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
158extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
159extern int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
160			unsigned long addr, unsigned long status);
161extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
162			unsigned long slb_v, unsigned long valid);
163extern int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
164			unsigned long gpa, gva_t ea, int is_store);
165
166extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
167extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
168extern void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte);
169extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu);
170extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
171extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
172extern int kvmppc_mmu_hpte_sysinit(void);
173extern void kvmppc_mmu_hpte_sysexit(void);
174extern int kvmppc_mmu_hv_init(void);
175extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc);
176
177extern int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
178			unsigned long ea, unsigned long dsisr);
179extern unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
180					gva_t eaddr, void *to, void *from,
181					unsigned long n);
182extern long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
183					void *to, unsigned long n);
184extern long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
185				      void *from, unsigned long n);
186extern int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
187				      struct kvmppc_pte *gpte, u64 root,
188				      u64 *pte_ret_p);
189extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
190			struct kvmppc_pte *gpte, u64 table,
191			int table_index, u64 *pte_ret_p);
192extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
193			struct kvmppc_pte *gpte, bool data, bool iswrite);
194extern void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
195				    unsigned int pshift, u64 lpid);
196extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
197			unsigned int shift,
198			const struct kvm_memory_slot *memslot,
199			u64 lpid);
200extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested,
201				    bool writing, unsigned long gpa,
202				    u64 lpid);
203extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
204				unsigned long gpa,
205				struct kvm_memory_slot *memslot,
206				bool writing, bool kvm_ro,
207				pte_t *inserted_pte, unsigned int *levelp);
208extern int kvmppc_init_vm_radix(struct kvm *kvm);
209extern void kvmppc_free_radix(struct kvm *kvm);
210extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd,
211				      u64 lpid);
212extern int kvmppc_radix_init(void);
213extern void kvmppc_radix_exit(void);
214extern void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
215			    unsigned long gfn);
216extern bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
217			  unsigned long gfn);
218extern bool kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
219			       unsigned long gfn);
220extern long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
221			struct kvm_memory_slot *memslot, unsigned long *map);
222extern void kvmppc_radix_flush_memslot(struct kvm *kvm,
223			const struct kvm_memory_slot *memslot);
224extern int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
225
226/* XXX remove this export when load_last_inst() is generic */
227extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
228extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
229extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
230					  unsigned int vec);
231extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
232extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac);
233extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
234			   bool upper, u32 val);
235extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
236extern int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu);
237extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa,
238			bool writing, bool *writable);
239extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
240			unsigned long *rmap, long pte_index, int realmode);
241extern void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot,
242			unsigned long gfn, unsigned long psize);
243extern void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
244			unsigned long pte_index);
245void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
246			unsigned long pte_index);
247extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
248			unsigned long *nb_ret);
249extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr,
250			unsigned long gpa, bool dirty);
251extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
252			long pte_index, unsigned long pteh, unsigned long ptel,
253			pgd_t *pgdir, bool realmode, unsigned long *idx_ret);
254extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
255			unsigned long pte_index, unsigned long avpn,
256			unsigned long *hpret);
257extern long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm,
258			struct kvm_memory_slot *memslot, unsigned long *map);
259extern void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa,
260			struct kvm_memory_slot *memslot,
261			unsigned long *map);
262extern unsigned long kvmppc_filter_lpcr_hv(struct kvm *kvm,
263			unsigned long lpcr);
264extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
265			unsigned long mask);
266extern void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr);
267
268extern int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu);
269extern int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu);
270extern void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu);
271
272extern void kvmppc_entry_trampoline(void);
273extern void kvmppc_hv_entry_trampoline(void);
274extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
275extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
276extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
277extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm);
278extern int kvmppc_hcall_impl_pr(unsigned long cmd);
279extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
280extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
281extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);
282
283long kvmppc_read_intr(void);
284void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr);
285void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
286
287#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
288void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu);
289void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu);
290void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu);
291void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu);
292#else
293static inline void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) {}
294static inline void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) {}
295static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {}
296static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {}
297#endif
298
299extern unsigned long nested_capabilities;
300long kvmhv_nested_init(void);
301void kvmhv_nested_exit(void);
302void kvmhv_vm_nested_init(struct kvm *kvm);
303long kvmhv_set_partition_table(struct kvm_vcpu *vcpu);
304long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu);
305void kvmhv_flush_lpid(u64 lpid);
306void kvmhv_set_ptbl_entry(u64 lpid, u64 dw0, u64 dw1);
307void kvmhv_release_all_nested(struct kvm *kvm);
308long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
309long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
310long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid,
311			     unsigned long type, unsigned long pg_sizes,
312			     unsigned long start, unsigned long end);
313int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu,
314			  u64 time_limit, unsigned long lpcr);
315void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);
316void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
317				   struct hv_guest_state *hr);
318long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu);
319
320void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
321
322
323#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
324
325extern struct static_key_false __kvmhv_is_nestedv2;
326
327static inline bool kvmhv_is_nestedv2(void)
328{
329	return static_branch_unlikely(&__kvmhv_is_nestedv2);
330}
331
332static inline bool kvmhv_is_nestedv1(void)
333{
334	return !static_branch_likely(&__kvmhv_is_nestedv2);
335}
336
337#else
338
339static inline bool kvmhv_is_nestedv2(void)
340{
341	return false;
342}
343
344static inline bool kvmhv_is_nestedv1(void)
345{
346	return false;
347}
348
349#endif
350
351int __kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs);
352int __kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs);
353int __kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden);
354int __kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden);
355
356static inline int kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu,
357					       struct pt_regs *regs)
358{
359	if (kvmhv_is_nestedv2())
360		return __kvmhv_nestedv2_reload_ptregs(vcpu, regs);
361	return 0;
362}
363static inline int kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu,
364						   struct pt_regs *regs)
365{
366	if (kvmhv_is_nestedv2())
367		return __kvmhv_nestedv2_mark_dirty_ptregs(vcpu, regs);
368	return 0;
369}
370
371static inline int kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden)
372{
373	if (kvmhv_is_nestedv2())
374		return __kvmhv_nestedv2_mark_dirty(vcpu, iden);
375	return 0;
376}
377
378static inline int kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden)
379{
380	if (kvmhv_is_nestedv2())
381		return __kvmhv_nestedv2_cached_reload(vcpu, iden);
382	return 0;
383}
384
385extern int kvm_irq_bypass;
386
387static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
388{
389	return vcpu->arch.book3s;
390}
391
392/* Also add subarch specific defines */
393
394#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
395#include <asm/kvm_book3s_32.h>
396#endif
397#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
398#include <asm/kvm_book3s_64.h>
399#endif
400
401static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
402{
403	vcpu->arch.regs.gpr[num] = val;
404	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_GPR(num));
405}
406
407static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
408{
409	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_GPR(num)) < 0);
410	return vcpu->arch.regs.gpr[num];
411}
412
413static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
414{
415	vcpu->arch.regs.ccr = val;
416	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CR);
417}
418
419static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
420{
421	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_CR) < 0);
422	return vcpu->arch.regs.ccr;
423}
424
425static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
426{
427	vcpu->arch.regs.xer = val;
428	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_XER);
429}
430
431static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
432{
433	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_XER) < 0);
434	return vcpu->arch.regs.xer;
435}
436
437static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
438{
439	vcpu->arch.regs.ctr = val;
440	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CTR);
441}
442
443static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
444{
445	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_CTR) < 0);
446	return vcpu->arch.regs.ctr;
447}
448
449static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
450{
451	vcpu->arch.regs.link = val;
452	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LR);
453}
454
455static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
456{
457	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_LR) < 0);
458	return vcpu->arch.regs.link;
459}
460
461static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
462{
463	vcpu->arch.regs.nip = val;
464	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_NIA);
465}
466
467static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
468{
469	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_NIA) < 0);
470	return vcpu->arch.regs.nip;
471}
472
473static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu);
474static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
475{
476	return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE);
477}
478
479static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
480{
481	return vcpu->arch.fault_dar;
482}
483
484static inline u64 kvmppc_get_fpr(struct kvm_vcpu *vcpu, int i)
485{
486	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(i)) < 0);
487	return vcpu->arch.fp.fpr[i][TS_FPROFFSET];
488}
489
490static inline void kvmppc_set_fpr(struct kvm_vcpu *vcpu, int i, u64 val)
491{
492	vcpu->arch.fp.fpr[i][TS_FPROFFSET] = val;
493	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(i));
494}
495
496static inline u64 kvmppc_get_fpscr(struct kvm_vcpu *vcpu)
497{
498	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_FPSCR) < 0);
499	return vcpu->arch.fp.fpscr;
500}
501
502static inline void kvmppc_set_fpscr(struct kvm_vcpu *vcpu, u64 val)
503{
504	vcpu->arch.fp.fpscr = val;
505	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_FPSCR);
506}
507
508
509static inline u64 kvmppc_get_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j)
510{
511	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(i)) < 0);
512	return vcpu->arch.fp.fpr[i][j];
513}
514
515static inline void kvmppc_set_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j,
516				      u64 val)
517{
518	vcpu->arch.fp.fpr[i][j] = val;
519	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(i));
520}
521
522#ifdef CONFIG_ALTIVEC
523static inline void kvmppc_get_vsx_vr(struct kvm_vcpu *vcpu, int i, vector128 *v)
524{
525	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(32 + i)) < 0);
526	*v =  vcpu->arch.vr.vr[i];
527}
528
529static inline void kvmppc_set_vsx_vr(struct kvm_vcpu *vcpu, int i,
530				     vector128 *val)
531{
532	vcpu->arch.vr.vr[i] = *val;
533	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(32 + i));
534}
535
536static inline u32 kvmppc_get_vscr(struct kvm_vcpu *vcpu)
537{
538	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSCR) < 0);
539	return vcpu->arch.vr.vscr.u[3];
540}
541
542static inline void kvmppc_set_vscr(struct kvm_vcpu *vcpu, u32 val)
543{
544	vcpu->arch.vr.vscr.u[3] = val;
545	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSCR);
546}
547#endif
548
549#define KVMPPC_BOOK3S_VCPU_ACCESSOR_SET(reg, size, iden)		\
550static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val)	\
551{									\
552									\
553	vcpu->arch.reg = val;						\
554	kvmhv_nestedv2_mark_dirty(vcpu, iden);				\
555}
556
557#define KVMPPC_BOOK3S_VCPU_ACCESSOR_GET(reg, size, iden)		\
558static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
559{									\
560	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0);		\
561	return vcpu->arch.reg;						\
562}
563
564#define KVMPPC_BOOK3S_VCPU_ACCESSOR(reg, size, iden)			\
565	KVMPPC_BOOK3S_VCPU_ACCESSOR_SET(reg, size, iden)		\
566	KVMPPC_BOOK3S_VCPU_ACCESSOR_GET(reg, size, iden)		\
567
568KVMPPC_BOOK3S_VCPU_ACCESSOR(pid, 32, KVMPPC_GSID_PIDR)
569KVMPPC_BOOK3S_VCPU_ACCESSOR(tar, 64, KVMPPC_GSID_TAR)
570KVMPPC_BOOK3S_VCPU_ACCESSOR(ebbhr, 64, KVMPPC_GSID_EBBHR)
571KVMPPC_BOOK3S_VCPU_ACCESSOR(ebbrr, 64, KVMPPC_GSID_EBBRR)
572KVMPPC_BOOK3S_VCPU_ACCESSOR(bescr, 64, KVMPPC_GSID_BESCR)
573KVMPPC_BOOK3S_VCPU_ACCESSOR(ic, 64, KVMPPC_GSID_IC)
574KVMPPC_BOOK3S_VCPU_ACCESSOR(vrsave, 64, KVMPPC_GSID_VRSAVE)
575
576
577#define KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(reg, size, iden)		\
578static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val)	\
579{									\
580	vcpu->arch.vcore->reg = val;					\
581	kvmhv_nestedv2_mark_dirty(vcpu, iden);				\
582}
583
584#define KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(reg, size, iden)		\
585static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
586{									\
587	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0);		\
588	return vcpu->arch.vcore->reg;					\
589}
590
591#define KVMPPC_BOOK3S_VCORE_ACCESSOR(reg, size, iden)			\
592	KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(reg, size, iden)		\
593	KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(reg, size, iden)		\
594
595
596KVMPPC_BOOK3S_VCORE_ACCESSOR(vtb, 64, KVMPPC_GSID_VTB)
597KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(arch_compat, 32, KVMPPC_GSID_LOGICAL_PVR)
598KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(lpcr, 64, KVMPPC_GSID_LPCR)
599KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(tb_offset, 64, KVMPPC_GSID_TB_OFFSET)
600
601static inline u64 kvmppc_get_tb_offset(struct kvm_vcpu *vcpu)
602{
603	return vcpu->arch.vcore->tb_offset;
604}
605
606static inline u64 kvmppc_get_dec_expires(struct kvm_vcpu *vcpu)
607{
608	WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_DEC_EXPIRY_TB) < 0);
609	return vcpu->arch.dec_expires;
610}
611
612static inline void kvmppc_set_dec_expires(struct kvm_vcpu *vcpu, u64 val)
613{
614	vcpu->arch.dec_expires = val;
615	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_DEC_EXPIRY_TB);
616}
617
618/* Expiry time of vcpu DEC relative to host TB */
619static inline u64 kvmppc_dec_expires_host_tb(struct kvm_vcpu *vcpu)
620{
621	return kvmppc_get_dec_expires(vcpu) - kvmppc_get_tb_offset(vcpu);
622}
623
624static inline bool is_kvmppc_resume_guest(int r)
625{
626	return (r == RESUME_GUEST || r == RESUME_GUEST_NV);
627}
628
629static inline bool is_kvmppc_hv_enabled(struct kvm *kvm);
630static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu)
631{
632	/* Only PR KVM supports the magic page */
633	return !is_kvmppc_hv_enabled(vcpu->kvm);
634}
635
636extern int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu);
637extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu);
638
639/* Magic register values loaded into r3 and r4 before the 'sc' assembly
640 * instruction for the OSI hypercalls */
641#define OSI_SC_MAGIC_R3			0x113724FA
642#define OSI_SC_MAGIC_R4			0x77810F9B
643
644#define INS_DCBZ			0x7c0007ec
645/* TO = 31 for unconditional trap */
646#define INS_TW				0x7fe00008
647
648#define SPLIT_HACK_MASK			0xff000000
649#define SPLIT_HACK_OFFS			0xfb000000
650
651/*
652 * This packs a VCPU ID from the [0..KVM_MAX_VCPU_IDS) space down to the
653 * [0..KVM_MAX_VCPUS) space, using knowledge of the guest's core stride
654 * (but not its actual threading mode, which is not available) to avoid
655 * collisions.
656 *
657 * The implementation leaves VCPU IDs from the range [0..KVM_MAX_VCPUS) (block
658 * 0) unchanged: if the guest is filling each VCORE completely then it will be
659 * using consecutive IDs and it will fill the space without any packing.
660 *
661 * For higher VCPU IDs, the packed ID is based on the VCPU ID modulo
662 * KVM_MAX_VCPUS (effectively masking off the top bits) and then an offset is
663 * added to avoid collisions.
664 *
665 * VCPU IDs in the range [KVM_MAX_VCPUS..(KVM_MAX_VCPUS*2)) (block 1) are only
666 * possible if the guest is leaving at least 1/2 of each VCORE empty, so IDs
667 * can be safely packed into the second half of each VCORE by adding an offset
668 * of (stride / 2).
669 *
670 * Similarly, if VCPU IDs in the range [(KVM_MAX_VCPUS*2)..(KVM_MAX_VCPUS*4))
671 * (blocks 2 and 3) are seen, the guest must be leaving at least 3/4 of each
672 * VCORE empty so packed IDs can be offset by (stride / 4) and (stride * 3 / 4).
673 *
674 * Finally, VCPU IDs from blocks 5..7 will only be seen if the guest is using a
675 * stride of 8 and 1 thread per core so the remaining offsets of 1, 5, 3 and 7
676 * must be free to use.
677 *
678 * (The offsets for each block are stored in block_offsets[], indexed by the
679 * block number if the stride is 8. For cases where the guest's stride is less
680 * than 8, we can re-use the block_offsets array by multiplying the block
681 * number by (MAX_SMT_THREADS / stride) to reach the correct entry.)
682 */
683static inline u32 kvmppc_pack_vcpu_id(struct kvm *kvm, u32 id)
684{
685	const int block_offsets[MAX_SMT_THREADS] = {0, 4, 2, 6, 1, 5, 3, 7};
686	int stride = kvm->arch.emul_smt_mode;
687	int block = (id / KVM_MAX_VCPUS) * (MAX_SMT_THREADS / stride);
688	u32 packed_id;
689
690	if (WARN_ONCE(block >= MAX_SMT_THREADS, "VCPU ID too large to pack"))
691		return 0;
692	packed_id = (id % KVM_MAX_VCPUS) + block_offsets[block];
693	if (WARN_ONCE(packed_id >= KVM_MAX_VCPUS, "VCPU ID packing failed"))
694		return 0;
695	return packed_id;
696}
697
698#endif /* __ASM_KVM_BOOK3S_H__ */
699