1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Secure pages management: Migration of pages between normal and secure
4 * memory of KVM guests.
5 *
6 * Copyright 2018 Bharata B Rao, IBM Corp. <bharata@linux.ibm.com>
7 */
8
9/*
10 * A pseries guest can be run as secure guest on Ultravisor-enabled
11 * POWER platforms. On such platforms, this driver will be used to manage
12 * the movement of guest pages between the normal memory managed by
13 * hypervisor (HV) and secure memory managed by Ultravisor (UV).
14 *
15 * The page-in or page-out requests from UV will come to HV as hcalls and
16 * HV will call back into UV via ultracalls to satisfy these page requests.
17 *
18 * Private ZONE_DEVICE memory equal to the amount of secure memory
19 * available in the platform for running secure guests is hotplugged.
20 * Whenever a page belonging to the guest becomes secure, a page from this
21 * private device memory is used to represent and track that secure page
22 * on the HV side. Some pages (like virtio buffers, VPA pages etc) are
23 * shared between UV and HV. However such pages aren't represented by
24 * device private memory and mappings to shared memory exist in both
25 * UV and HV page tables.
26 */
27
28/*
29 * Notes on locking
30 *
31 * kvm->arch.uvmem_lock is a per-guest lock that prevents concurrent
32 * page-in and page-out requests for the same GPA. Concurrent accesses
33 * can either come via UV (guest vCPUs requesting for same page)
34 * or when HV and guest simultaneously access the same page.
35 * This mutex serializes the migration of page from HV(normal) to
36 * UV(secure) and vice versa. So the serialization points are around
37 * migrate_vma routines and page-in/out routines.
38 *
39 * Per-guest mutex comes with a cost though. Mainly it serializes the
40 * fault path as page-out can occur when HV faults on accessing secure
41 * guest pages. Currently UV issues page-in requests for all the guest
42 * PFNs one at a time during early boot (UV_ESM uvcall), so this is
43 * not a cause for concern. Also currently the number of page-outs caused
44 * by HV touching secure pages is very very low. If an when UV supports
45 * overcommitting, then we might see concurrent guest driven page-outs.
46 *
47 * Locking order
48 *
49 * 1. kvm->srcu - Protects KVM memslots
50 * 2. kvm->mm->mmap_lock - find_vma, migrate_vma_pages and helpers, ksm_madvise
51 * 3. kvm->arch.uvmem_lock - protects read/writes to uvmem slots thus acting
52 *			     as sync-points for page-in/out
53 */
54
55/*
56 * Notes on page size
57 *
58 * Currently UV uses 2MB mappings internally, but will issue H_SVM_PAGE_IN
59 * and H_SVM_PAGE_OUT hcalls in PAGE_SIZE(64K) granularity. HV tracks
60 * secure GPAs at 64K page size and maintains one device PFN for each
61 * 64K secure GPA. UV_PAGE_IN and UV_PAGE_OUT calls by HV are also issued
62 * for 64K page at a time.
63 *
64 * HV faulting on secure pages: When HV touches any secure page, it
65 * faults and issues a UV_PAGE_OUT request with 64K page size. Currently
66 * UV splits and remaps the 2MB page if necessary and copies out the
67 * required 64K page contents.
68 *
69 * Shared pages: Whenever guest shares a secure page, UV will split and
70 * remap the 2MB page if required and issue H_SVM_PAGE_IN with 64K page size.
71 *
72 * HV invalidating a page: When a regular page belonging to secure
73 * guest gets unmapped, HV informs UV with UV_PAGE_INVAL of 64K
74 * page size. Using 64K page size is correct here because any non-secure
75 * page will essentially be of 64K page size. Splitting by UV during sharing
76 * and page-out ensures this.
77 *
78 * Page fault handling: When HV handles page fault of a page belonging
79 * to secure guest, it sends that to UV with a 64K UV_PAGE_IN request.
80 * Using 64K size is correct here too as UV would have split the 2MB page
81 * into 64k mappings and would have done page-outs earlier.
82 *
83 * In summary, the current secure pages handling code in HV assumes
84 * 64K page size and in fact fails any page-in/page-out requests of
85 * non-64K size upfront. If and when UV starts supporting multiple
86 * page-sizes, we need to break this assumption.
87 */
88
89#include <linux/pagemap.h>
90#include <linux/migrate.h>
91#include <linux/kvm_host.h>
92#include <linux/ksm.h>
93#include <linux/of.h>
94#include <linux/memremap.h>
95#include <asm/ultravisor.h>
96#include <asm/mman.h>
97#include <asm/kvm_ppc.h>
98#include <asm/kvm_book3s_uvmem.h>
99
100static struct dev_pagemap kvmppc_uvmem_pgmap;
101static unsigned long *kvmppc_uvmem_bitmap;
102static DEFINE_SPINLOCK(kvmppc_uvmem_bitmap_lock);
103
104/*
105 * States of a GFN
106 * ---------------
107 * The GFN can be in one of the following states.
108 *
109 * (a) Secure - The GFN is secure. The GFN is associated with
110 *	a Secure VM, the contents of the GFN is not accessible
111 *	to the Hypervisor.  This GFN can be backed by a secure-PFN,
112 *	or can be backed by a normal-PFN with contents encrypted.
113 *	The former is true when the GFN is paged-in into the
114 *	ultravisor. The latter is true when the GFN is paged-out
115 *	of the ultravisor.
116 *
117 * (b) Shared - The GFN is shared. The GFN is associated with a
118 *	a secure VM. The contents of the GFN is accessible to
119 *	Hypervisor. This GFN is backed by a normal-PFN and its
120 *	content is un-encrypted.
121 *
122 * (c) Normal - The GFN is a normal. The GFN is associated with
123 *	a normal VM. The contents of the GFN is accessible to
124 *	the Hypervisor. Its content is never encrypted.
125 *
126 * States of a VM.
127 * ---------------
128 *
129 * Normal VM:  A VM whose contents are always accessible to
130 *	the hypervisor.  All its GFNs are normal-GFNs.
131 *
132 * Secure VM: A VM whose contents are not accessible to the
133 *	hypervisor without the VM's consent.  Its GFNs are
134 *	either Shared-GFN or Secure-GFNs.
135 *
136 * Transient VM: A Normal VM that is transitioning to secure VM.
137 *	The transition starts on successful return of
138 *	H_SVM_INIT_START, and ends on successful return
139 *	of H_SVM_INIT_DONE. This transient VM, can have GFNs
140 *	in any of the three states; i.e Secure-GFN, Shared-GFN,
141 *	and Normal-GFN.	The VM never executes in this state
142 *	in supervisor-mode.
143 *
144 * Memory slot State.
145 * -----------------------------
146 *	The state of a memory slot mirrors the state of the
147 *	VM the memory slot is associated with.
148 *
149 * VM State transition.
150 * --------------------
151 *
152 *  A VM always starts in Normal Mode.
153 *
154 *  H_SVM_INIT_START moves the VM into transient state. During this
155 *  time the Ultravisor may request some of its GFNs to be shared or
156 *  secured. So its GFNs can be in one of the three GFN states.
157 *
158 *  H_SVM_INIT_DONE moves the VM entirely from transient state to
159 *  secure-state. At this point any left-over normal-GFNs are
160 *  transitioned to Secure-GFN.
161 *
162 *  H_SVM_INIT_ABORT moves the transient VM back to normal VM.
163 *  All its GFNs are moved to Normal-GFNs.
164 *
165 *  UV_TERMINATE transitions the secure-VM back to normal-VM. All
166 *  the secure-GFN and shared-GFNs are tranistioned to normal-GFN
167 *  Note: The contents of the normal-GFN is undefined at this point.
168 *
169 * GFN state implementation:
170 * -------------------------
171 *
172 * Secure GFN is associated with a secure-PFN; also called uvmem_pfn,
173 * when the GFN is paged-in. Its pfn[] has KVMPPC_GFN_UVMEM_PFN flag
174 * set, and contains the value of the secure-PFN.
175 * It is associated with a normal-PFN; also called mem_pfn, when
176 * the GFN is pagedout. Its pfn[] has KVMPPC_GFN_MEM_PFN flag set.
177 * The value of the normal-PFN is not tracked.
178 *
179 * Shared GFN is associated with a normal-PFN. Its pfn[] has
180 * KVMPPC_UVMEM_SHARED_PFN flag set. The value of the normal-PFN
181 * is not tracked.
182 *
183 * Normal GFN is associated with normal-PFN. Its pfn[] has
184 * no flag set. The value of the normal-PFN is not tracked.
185 *
186 * Life cycle of a GFN
187 * --------------------
188 *
189 * --------------------------------------------------------------
190 * |        |     Share  |  Unshare | SVM       |H_SVM_INIT_DONE|
191 * |        |operation   |operation | abort/    |               |
192 * |        |            |          | terminate |               |
193 * -------------------------------------------------------------
194 * |        |            |          |           |               |
195 * | Secure |     Shared | Secure   |Normal     |Secure         |
196 * |        |            |          |           |               |
197 * | Shared |     Shared | Secure   |Normal     |Shared         |
198 * |        |            |          |           |               |
199 * | Normal |     Shared | Secure   |Normal     |Secure         |
200 * --------------------------------------------------------------
201 *
202 * Life cycle of a VM
203 * --------------------
204 *
205 * --------------------------------------------------------------------
206 * |         |  start    |  H_SVM_  |H_SVM_   |H_SVM_     |UV_SVM_    |
207 * |         |  VM       |INIT_START|INIT_DONE|INIT_ABORT |TERMINATE  |
208 * |         |           |          |         |           |           |
209 * --------- ----------------------------------------------------------
210 * |         |           |          |         |           |           |
211 * | Normal  | Normal    | Transient|Error    |Error      |Normal     |
212 * |         |           |          |         |           |           |
213 * | Secure  |   Error   | Error    |Error    |Error      |Normal     |
214 * |         |           |          |         |           |           |
215 * |Transient|   N/A     | Error    |Secure   |Normal     |Normal     |
216 * --------------------------------------------------------------------
217 */
218
219#define KVMPPC_GFN_UVMEM_PFN	(1UL << 63)
220#define KVMPPC_GFN_MEM_PFN	(1UL << 62)
221#define KVMPPC_GFN_SHARED	(1UL << 61)
222#define KVMPPC_GFN_SECURE	(KVMPPC_GFN_UVMEM_PFN | KVMPPC_GFN_MEM_PFN)
223#define KVMPPC_GFN_FLAG_MASK	(KVMPPC_GFN_SECURE | KVMPPC_GFN_SHARED)
224#define KVMPPC_GFN_PFN_MASK	(~KVMPPC_GFN_FLAG_MASK)
225
226struct kvmppc_uvmem_slot {
227	struct list_head list;
228	unsigned long nr_pfns;
229	unsigned long base_pfn;
230	unsigned long *pfns;
231};
232struct kvmppc_uvmem_page_pvt {
233	struct kvm *kvm;
234	unsigned long gpa;
235	bool skip_page_out;
236	bool remove_gfn;
237};
238
239bool kvmppc_uvmem_available(void)
240{
241	/*
242	 * If kvmppc_uvmem_bitmap != NULL, then there is an ultravisor
243	 * and our data structures have been initialized successfully.
244	 */
245	return !!kvmppc_uvmem_bitmap;
246}
247
248int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot)
249{
250	struct kvmppc_uvmem_slot *p;
251
252	p = kzalloc(sizeof(*p), GFP_KERNEL);
253	if (!p)
254		return -ENOMEM;
255	p->pfns = vcalloc(slot->npages, sizeof(*p->pfns));
256	if (!p->pfns) {
257		kfree(p);
258		return -ENOMEM;
259	}
260	p->nr_pfns = slot->npages;
261	p->base_pfn = slot->base_gfn;
262
263	mutex_lock(&kvm->arch.uvmem_lock);
264	list_add(&p->list, &kvm->arch.uvmem_pfns);
265	mutex_unlock(&kvm->arch.uvmem_lock);
266
267	return 0;
268}
269
270/*
271 * All device PFNs are already released by the time we come here.
272 */
273void kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot)
274{
275	struct kvmppc_uvmem_slot *p, *next;
276
277	mutex_lock(&kvm->arch.uvmem_lock);
278	list_for_each_entry_safe(p, next, &kvm->arch.uvmem_pfns, list) {
279		if (p->base_pfn == slot->base_gfn) {
280			vfree(p->pfns);
281			list_del(&p->list);
282			kfree(p);
283			break;
284		}
285	}
286	mutex_unlock(&kvm->arch.uvmem_lock);
287}
288
289static void kvmppc_mark_gfn(unsigned long gfn, struct kvm *kvm,
290			unsigned long flag, unsigned long uvmem_pfn)
291{
292	struct kvmppc_uvmem_slot *p;
293
294	list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
295		if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
296			unsigned long index = gfn - p->base_pfn;
297
298			if (flag == KVMPPC_GFN_UVMEM_PFN)
299				p->pfns[index] = uvmem_pfn | flag;
300			else
301				p->pfns[index] = flag;
302			return;
303		}
304	}
305}
306
307/* mark the GFN as secure-GFN associated with @uvmem pfn device-PFN. */
308static void kvmppc_gfn_secure_uvmem_pfn(unsigned long gfn,
309			unsigned long uvmem_pfn, struct kvm *kvm)
310{
311	kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_UVMEM_PFN, uvmem_pfn);
312}
313
314/* mark the GFN as secure-GFN associated with a memory-PFN. */
315static void kvmppc_gfn_secure_mem_pfn(unsigned long gfn, struct kvm *kvm)
316{
317	kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_MEM_PFN, 0);
318}
319
320/* mark the GFN as a shared GFN. */
321static void kvmppc_gfn_shared(unsigned long gfn, struct kvm *kvm)
322{
323	kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_SHARED, 0);
324}
325
326/* mark the GFN as a non-existent GFN. */
327static void kvmppc_gfn_remove(unsigned long gfn, struct kvm *kvm)
328{
329	kvmppc_mark_gfn(gfn, kvm, 0, 0);
330}
331
332/* return true, if the GFN is a secure-GFN backed by a secure-PFN */
333static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm,
334				    unsigned long *uvmem_pfn)
335{
336	struct kvmppc_uvmem_slot *p;
337
338	list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
339		if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
340			unsigned long index = gfn - p->base_pfn;
341
342			if (p->pfns[index] & KVMPPC_GFN_UVMEM_PFN) {
343				if (uvmem_pfn)
344					*uvmem_pfn = p->pfns[index] &
345						     KVMPPC_GFN_PFN_MASK;
346				return true;
347			} else
348				return false;
349		}
350	}
351	return false;
352}
353
354/*
355 * starting from *gfn search for the next available GFN that is not yet
356 * transitioned to a secure GFN.  return the value of that GFN in *gfn.  If a
357 * GFN is found, return true, else return false
358 *
359 * Must be called with kvm->arch.uvmem_lock  held.
360 */
361static bool kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot,
362		struct kvm *kvm, unsigned long *gfn)
363{
364	struct kvmppc_uvmem_slot *p = NULL, *iter;
365	bool ret = false;
366	unsigned long i;
367
368	list_for_each_entry(iter, &kvm->arch.uvmem_pfns, list)
369		if (*gfn >= iter->base_pfn && *gfn < iter->base_pfn + iter->nr_pfns) {
370			p = iter;
371			break;
372		}
373	if (!p)
374		return ret;
375	/*
376	 * The code below assumes, one to one correspondence between
377	 * kvmppc_uvmem_slot and memslot.
378	 */
379	for (i = *gfn; i < p->base_pfn + p->nr_pfns; i++) {
380		unsigned long index = i - p->base_pfn;
381
382		if (!(p->pfns[index] & KVMPPC_GFN_FLAG_MASK)) {
383			*gfn = i;
384			ret = true;
385			break;
386		}
387	}
388	return ret;
389}
390
391static int kvmppc_memslot_page_merge(struct kvm *kvm,
392		const struct kvm_memory_slot *memslot, bool merge)
393{
394	unsigned long gfn = memslot->base_gfn;
395	unsigned long end, start = gfn_to_hva(kvm, gfn);
396	unsigned long vm_flags;
397	int ret = 0;
398	struct vm_area_struct *vma;
399	int merge_flag = (merge) ? MADV_MERGEABLE : MADV_UNMERGEABLE;
400
401	if (kvm_is_error_hva(start))
402		return H_STATE;
403
404	end = start + (memslot->npages << PAGE_SHIFT);
405
406	mmap_write_lock(kvm->mm);
407	do {
408		vma = find_vma_intersection(kvm->mm, start, end);
409		if (!vma) {
410			ret = H_STATE;
411			break;
412		}
413		vma_start_write(vma);
414		/* Copy vm_flags to avoid partial modifications in ksm_madvise */
415		vm_flags = vma->vm_flags;
416		ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
417			  merge_flag, &vm_flags);
418		if (ret) {
419			ret = H_STATE;
420			break;
421		}
422		vm_flags_reset(vma, vm_flags);
423		start = vma->vm_end;
424	} while (end > vma->vm_end);
425
426	mmap_write_unlock(kvm->mm);
427	return ret;
428}
429
430static void __kvmppc_uvmem_memslot_delete(struct kvm *kvm,
431		const struct kvm_memory_slot *memslot)
432{
433	uv_unregister_mem_slot(kvm->arch.lpid, memslot->id);
434	kvmppc_uvmem_slot_free(kvm, memslot);
435	kvmppc_memslot_page_merge(kvm, memslot, true);
436}
437
438static int __kvmppc_uvmem_memslot_create(struct kvm *kvm,
439		const struct kvm_memory_slot *memslot)
440{
441	int ret = H_PARAMETER;
442
443	if (kvmppc_memslot_page_merge(kvm, memslot, false))
444		return ret;
445
446	if (kvmppc_uvmem_slot_init(kvm, memslot))
447		goto out1;
448
449	ret = uv_register_mem_slot(kvm->arch.lpid,
450				   memslot->base_gfn << PAGE_SHIFT,
451				   memslot->npages * PAGE_SIZE,
452				   0, memslot->id);
453	if (ret < 0) {
454		ret = H_PARAMETER;
455		goto out;
456	}
457	return 0;
458out:
459	kvmppc_uvmem_slot_free(kvm, memslot);
460out1:
461	kvmppc_memslot_page_merge(kvm, memslot, true);
462	return ret;
463}
464
465unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
466{
467	struct kvm_memslots *slots;
468	struct kvm_memory_slot *memslot, *m;
469	int ret = H_SUCCESS;
470	int srcu_idx, bkt;
471
472	kvm->arch.secure_guest = KVMPPC_SECURE_INIT_START;
473
474	if (!kvmppc_uvmem_bitmap)
475		return H_UNSUPPORTED;
476
477	/* Only radix guests can be secure guests */
478	if (!kvm_is_radix(kvm))
479		return H_UNSUPPORTED;
480
481	/* NAK the transition to secure if not enabled */
482	if (!kvm->arch.svm_enabled)
483		return H_AUTHORITY;
484
485	srcu_idx = srcu_read_lock(&kvm->srcu);
486
487	/* register the memslot */
488	slots = kvm_memslots(kvm);
489	kvm_for_each_memslot(memslot, bkt, slots) {
490		ret = __kvmppc_uvmem_memslot_create(kvm, memslot);
491		if (ret)
492			break;
493	}
494
495	if (ret) {
496		slots = kvm_memslots(kvm);
497		kvm_for_each_memslot(m, bkt, slots) {
498			if (m == memslot)
499				break;
500			__kvmppc_uvmem_memslot_delete(kvm, memslot);
501		}
502	}
503
504	srcu_read_unlock(&kvm->srcu, srcu_idx);
505	return ret;
506}
507
508/*
509 * Provision a new page on HV side and copy over the contents
510 * from secure memory using UV_PAGE_OUT uvcall.
511 * Caller must held kvm->arch.uvmem_lock.
512 */
513static int __kvmppc_svm_page_out(struct vm_area_struct *vma,
514		unsigned long start,
515		unsigned long end, unsigned long page_shift,
516		struct kvm *kvm, unsigned long gpa, struct page *fault_page)
517{
518	unsigned long src_pfn, dst_pfn = 0;
519	struct migrate_vma mig = { 0 };
520	struct page *dpage, *spage;
521	struct kvmppc_uvmem_page_pvt *pvt;
522	unsigned long pfn;
523	int ret = U_SUCCESS;
524
525	memset(&mig, 0, sizeof(mig));
526	mig.vma = vma;
527	mig.start = start;
528	mig.end = end;
529	mig.src = &src_pfn;
530	mig.dst = &dst_pfn;
531	mig.pgmap_owner = &kvmppc_uvmem_pgmap;
532	mig.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
533	mig.fault_page = fault_page;
534
535	/* The requested page is already paged-out, nothing to do */
536	if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL))
537		return ret;
538
539	ret = migrate_vma_setup(&mig);
540	if (ret)
541		return -1;
542
543	spage = migrate_pfn_to_page(*mig.src);
544	if (!spage || !(*mig.src & MIGRATE_PFN_MIGRATE))
545		goto out_finalize;
546
547	if (!is_zone_device_page(spage))
548		goto out_finalize;
549
550	dpage = alloc_page_vma(GFP_HIGHUSER, vma, start);
551	if (!dpage) {
552		ret = -1;
553		goto out_finalize;
554	}
555
556	lock_page(dpage);
557	pvt = spage->zone_device_data;
558	pfn = page_to_pfn(dpage);
559
560	/*
561	 * This function is used in two cases:
562	 * - When HV touches a secure page, for which we do UV_PAGE_OUT
563	 * - When a secure page is converted to shared page, we *get*
564	 *   the page to essentially unmap the device page. In this
565	 *   case we skip page-out.
566	 */
567	if (!pvt->skip_page_out)
568		ret = uv_page_out(kvm->arch.lpid, pfn << page_shift,
569				  gpa, 0, page_shift);
570
571	if (ret == U_SUCCESS)
572		*mig.dst = migrate_pfn(pfn);
573	else {
574		unlock_page(dpage);
575		__free_page(dpage);
576		goto out_finalize;
577	}
578
579	migrate_vma_pages(&mig);
580
581out_finalize:
582	migrate_vma_finalize(&mig);
583	return ret;
584}
585
586static inline int kvmppc_svm_page_out(struct vm_area_struct *vma,
587				      unsigned long start, unsigned long end,
588				      unsigned long page_shift,
589				      struct kvm *kvm, unsigned long gpa,
590				      struct page *fault_page)
591{
592	int ret;
593
594	mutex_lock(&kvm->arch.uvmem_lock);
595	ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa,
596				fault_page);
597	mutex_unlock(&kvm->arch.uvmem_lock);
598
599	return ret;
600}
601
602/*
603 * Drop device pages that we maintain for the secure guest
604 *
605 * We first mark the pages to be skipped from UV_PAGE_OUT when there
606 * is HV side fault on these pages. Next we *get* these pages, forcing
607 * fault on them, do fault time migration to replace the device PTEs in
608 * QEMU page table with normal PTEs from newly allocated pages.
609 */
610void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *slot,
611			     struct kvm *kvm, bool skip_page_out)
612{
613	int i;
614	struct kvmppc_uvmem_page_pvt *pvt;
615	struct page *uvmem_page;
616	struct vm_area_struct *vma = NULL;
617	unsigned long uvmem_pfn, gfn;
618	unsigned long addr;
619
620	mmap_read_lock(kvm->mm);
621
622	addr = slot->userspace_addr;
623
624	gfn = slot->base_gfn;
625	for (i = slot->npages; i; --i, ++gfn, addr += PAGE_SIZE) {
626
627		/* Fetch the VMA if addr is not in the latest fetched one */
628		if (!vma || addr >= vma->vm_end) {
629			vma = vma_lookup(kvm->mm, addr);
630			if (!vma) {
631				pr_err("Can't find VMA for gfn:0x%lx\n", gfn);
632				break;
633			}
634		}
635
636		mutex_lock(&kvm->arch.uvmem_lock);
637
638		if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
639			uvmem_page = pfn_to_page(uvmem_pfn);
640			pvt = uvmem_page->zone_device_data;
641			pvt->skip_page_out = skip_page_out;
642			pvt->remove_gfn = true;
643
644			if (__kvmppc_svm_page_out(vma, addr, addr + PAGE_SIZE,
645						  PAGE_SHIFT, kvm, pvt->gpa, NULL))
646				pr_err("Can't page out gpa:0x%lx addr:0x%lx\n",
647				       pvt->gpa, addr);
648		} else {
649			/* Remove the shared flag if any */
650			kvmppc_gfn_remove(gfn, kvm);
651		}
652
653		mutex_unlock(&kvm->arch.uvmem_lock);
654	}
655
656	mmap_read_unlock(kvm->mm);
657}
658
659unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm)
660{
661	int srcu_idx, bkt;
662	struct kvm_memory_slot *memslot;
663
664	/*
665	 * Expect to be called only after INIT_START and before INIT_DONE.
666	 * If INIT_DONE was completed, use normal VM termination sequence.
667	 */
668	if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
669		return H_UNSUPPORTED;
670
671	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
672		return H_STATE;
673
674	srcu_idx = srcu_read_lock(&kvm->srcu);
675
676	kvm_for_each_memslot(memslot, bkt, kvm_memslots(kvm))
677		kvmppc_uvmem_drop_pages(memslot, kvm, false);
678
679	srcu_read_unlock(&kvm->srcu, srcu_idx);
680
681	kvm->arch.secure_guest = 0;
682	uv_svm_terminate(kvm->arch.lpid);
683
684	return H_PARAMETER;
685}
686
687/*
688 * Get a free device PFN from the pool
689 *
690 * Called when a normal page is moved to secure memory (UV_PAGE_IN). Device
691 * PFN will be used to keep track of the secure page on HV side.
692 *
693 * Called with kvm->arch.uvmem_lock held
694 */
695static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm)
696{
697	struct page *dpage = NULL;
698	unsigned long bit, uvmem_pfn;
699	struct kvmppc_uvmem_page_pvt *pvt;
700	unsigned long pfn_last, pfn_first;
701
702	pfn_first = kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT;
703	pfn_last = pfn_first +
704		   (range_len(&kvmppc_uvmem_pgmap.range) >> PAGE_SHIFT);
705
706	spin_lock(&kvmppc_uvmem_bitmap_lock);
707	bit = find_first_zero_bit(kvmppc_uvmem_bitmap,
708				  pfn_last - pfn_first);
709	if (bit >= (pfn_last - pfn_first))
710		goto out;
711	bitmap_set(kvmppc_uvmem_bitmap, bit, 1);
712	spin_unlock(&kvmppc_uvmem_bitmap_lock);
713
714	pvt = kzalloc(sizeof(*pvt), GFP_KERNEL);
715	if (!pvt)
716		goto out_clear;
717
718	uvmem_pfn = bit + pfn_first;
719	kvmppc_gfn_secure_uvmem_pfn(gpa >> PAGE_SHIFT, uvmem_pfn, kvm);
720
721	pvt->gpa = gpa;
722	pvt->kvm = kvm;
723
724	dpage = pfn_to_page(uvmem_pfn);
725	dpage->zone_device_data = pvt;
726	zone_device_page_init(dpage);
727	return dpage;
728out_clear:
729	spin_lock(&kvmppc_uvmem_bitmap_lock);
730	bitmap_clear(kvmppc_uvmem_bitmap, bit, 1);
731out:
732	spin_unlock(&kvmppc_uvmem_bitmap_lock);
733	return NULL;
734}
735
736/*
737 * Alloc a PFN from private device memory pool. If @pagein is true,
738 * copy page from normal memory to secure memory using UV_PAGE_IN uvcall.
739 */
740static int kvmppc_svm_page_in(struct vm_area_struct *vma,
741		unsigned long start,
742		unsigned long end, unsigned long gpa, struct kvm *kvm,
743		unsigned long page_shift,
744		bool pagein)
745{
746	unsigned long src_pfn, dst_pfn = 0;
747	struct migrate_vma mig = { 0 };
748	struct page *spage;
749	unsigned long pfn;
750	struct page *dpage;
751	int ret = 0;
752
753	memset(&mig, 0, sizeof(mig));
754	mig.vma = vma;
755	mig.start = start;
756	mig.end = end;
757	mig.src = &src_pfn;
758	mig.dst = &dst_pfn;
759	mig.flags = MIGRATE_VMA_SELECT_SYSTEM;
760
761	ret = migrate_vma_setup(&mig);
762	if (ret)
763		return ret;
764
765	if (!(*mig.src & MIGRATE_PFN_MIGRATE)) {
766		ret = -1;
767		goto out_finalize;
768	}
769
770	dpage = kvmppc_uvmem_get_page(gpa, kvm);
771	if (!dpage) {
772		ret = -1;
773		goto out_finalize;
774	}
775
776	if (pagein) {
777		pfn = *mig.src >> MIGRATE_PFN_SHIFT;
778		spage = migrate_pfn_to_page(*mig.src);
779		if (spage) {
780			ret = uv_page_in(kvm->arch.lpid, pfn << page_shift,
781					gpa, 0, page_shift);
782			if (ret)
783				goto out_finalize;
784		}
785	}
786
787	*mig.dst = migrate_pfn(page_to_pfn(dpage));
788	migrate_vma_pages(&mig);
789out_finalize:
790	migrate_vma_finalize(&mig);
791	return ret;
792}
793
794static int kvmppc_uv_migrate_mem_slot(struct kvm *kvm,
795		const struct kvm_memory_slot *memslot)
796{
797	unsigned long gfn = memslot->base_gfn;
798	struct vm_area_struct *vma;
799	unsigned long start, end;
800	int ret = 0;
801
802	mmap_read_lock(kvm->mm);
803	mutex_lock(&kvm->arch.uvmem_lock);
804	while (kvmppc_next_nontransitioned_gfn(memslot, kvm, &gfn)) {
805		ret = H_STATE;
806		start = gfn_to_hva(kvm, gfn);
807		if (kvm_is_error_hva(start))
808			break;
809
810		end = start + (1UL << PAGE_SHIFT);
811		vma = find_vma_intersection(kvm->mm, start, end);
812		if (!vma || vma->vm_start > start || vma->vm_end < end)
813			break;
814
815		ret = kvmppc_svm_page_in(vma, start, end,
816				(gfn << PAGE_SHIFT), kvm, PAGE_SHIFT, false);
817		if (ret) {
818			ret = H_STATE;
819			break;
820		}
821
822		/* relinquish the cpu if needed */
823		cond_resched();
824	}
825	mutex_unlock(&kvm->arch.uvmem_lock);
826	mmap_read_unlock(kvm->mm);
827	return ret;
828}
829
830unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
831{
832	struct kvm_memslots *slots;
833	struct kvm_memory_slot *memslot;
834	int srcu_idx, bkt;
835	long ret = H_SUCCESS;
836
837	if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
838		return H_UNSUPPORTED;
839
840	/* migrate any unmoved normal pfn to device pfns*/
841	srcu_idx = srcu_read_lock(&kvm->srcu);
842	slots = kvm_memslots(kvm);
843	kvm_for_each_memslot(memslot, bkt, slots) {
844		ret = kvmppc_uv_migrate_mem_slot(kvm, memslot);
845		if (ret) {
846			/*
847			 * The pages will remain transitioned.
848			 * Its the callers responsibility to
849			 * terminate the VM, which will undo
850			 * all state of the VM. Till then
851			 * this VM is in a erroneous state.
852			 * Its KVMPPC_SECURE_INIT_DONE will
853			 * remain unset.
854			 */
855			ret = H_STATE;
856			goto out;
857		}
858	}
859
860	kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE;
861	pr_info("LPID %lld went secure\n", kvm->arch.lpid);
862
863out:
864	srcu_read_unlock(&kvm->srcu, srcu_idx);
865	return ret;
866}
867
868/*
869 * Shares the page with HV, thus making it a normal page.
870 *
871 * - If the page is already secure, then provision a new page and share
872 * - If the page is a normal page, share the existing page
873 *
874 * In the former case, uses dev_pagemap_ops.migrate_to_ram handler
875 * to unmap the device page from QEMU's page tables.
876 */
877static unsigned long kvmppc_share_page(struct kvm *kvm, unsigned long gpa,
878		unsigned long page_shift)
879{
880
881	int ret = H_PARAMETER;
882	struct page *uvmem_page;
883	struct kvmppc_uvmem_page_pvt *pvt;
884	unsigned long pfn;
885	unsigned long gfn = gpa >> page_shift;
886	int srcu_idx;
887	unsigned long uvmem_pfn;
888
889	srcu_idx = srcu_read_lock(&kvm->srcu);
890	mutex_lock(&kvm->arch.uvmem_lock);
891	if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
892		uvmem_page = pfn_to_page(uvmem_pfn);
893		pvt = uvmem_page->zone_device_data;
894		pvt->skip_page_out = true;
895		/*
896		 * do not drop the GFN. It is a valid GFN
897		 * that is transitioned to a shared GFN.
898		 */
899		pvt->remove_gfn = false;
900	}
901
902retry:
903	mutex_unlock(&kvm->arch.uvmem_lock);
904	pfn = gfn_to_pfn(kvm, gfn);
905	if (is_error_noslot_pfn(pfn))
906		goto out;
907
908	mutex_lock(&kvm->arch.uvmem_lock);
909	if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
910		uvmem_page = pfn_to_page(uvmem_pfn);
911		pvt = uvmem_page->zone_device_data;
912		pvt->skip_page_out = true;
913		pvt->remove_gfn = false; /* it continues to be a valid GFN */
914		kvm_release_pfn_clean(pfn);
915		goto retry;
916	}
917
918	if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0,
919				page_shift)) {
920		kvmppc_gfn_shared(gfn, kvm);
921		ret = H_SUCCESS;
922	}
923	kvm_release_pfn_clean(pfn);
924	mutex_unlock(&kvm->arch.uvmem_lock);
925out:
926	srcu_read_unlock(&kvm->srcu, srcu_idx);
927	return ret;
928}
929
930/*
931 * H_SVM_PAGE_IN: Move page from normal memory to secure memory.
932 *
933 * H_PAGE_IN_SHARED flag makes the page shared which means that the same
934 * memory in is visible from both UV and HV.
935 */
936unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
937		unsigned long flags,
938		unsigned long page_shift)
939{
940	unsigned long start, end;
941	struct vm_area_struct *vma;
942	int srcu_idx;
943	unsigned long gfn = gpa >> page_shift;
944	int ret;
945
946	if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
947		return H_UNSUPPORTED;
948
949	if (page_shift != PAGE_SHIFT)
950		return H_P3;
951
952	if (flags & ~H_PAGE_IN_SHARED)
953		return H_P2;
954
955	if (flags & H_PAGE_IN_SHARED)
956		return kvmppc_share_page(kvm, gpa, page_shift);
957
958	ret = H_PARAMETER;
959	srcu_idx = srcu_read_lock(&kvm->srcu);
960	mmap_read_lock(kvm->mm);
961
962	start = gfn_to_hva(kvm, gfn);
963	if (kvm_is_error_hva(start))
964		goto out;
965
966	mutex_lock(&kvm->arch.uvmem_lock);
967	/* Fail the page-in request of an already paged-in page */
968	if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
969		goto out_unlock;
970
971	end = start + (1UL << page_shift);
972	vma = find_vma_intersection(kvm->mm, start, end);
973	if (!vma || vma->vm_start > start || vma->vm_end < end)
974		goto out_unlock;
975
976	if (kvmppc_svm_page_in(vma, start, end, gpa, kvm, page_shift,
977				true))
978		goto out_unlock;
979
980	ret = H_SUCCESS;
981
982out_unlock:
983	mutex_unlock(&kvm->arch.uvmem_lock);
984out:
985	mmap_read_unlock(kvm->mm);
986	srcu_read_unlock(&kvm->srcu, srcu_idx);
987	return ret;
988}
989
990
991/*
992 * Fault handler callback that gets called when HV touches any page that
993 * has been moved to secure memory, we ask UV to give back the page by
994 * issuing UV_PAGE_OUT uvcall.
995 *
996 * This eventually results in dropping of device PFN and the newly
997 * provisioned page/PFN gets populated in QEMU page tables.
998 */
999static vm_fault_t kvmppc_uvmem_migrate_to_ram(struct vm_fault *vmf)
1000{
1001	struct kvmppc_uvmem_page_pvt *pvt = vmf->page->zone_device_data;
1002
1003	if (kvmppc_svm_page_out(vmf->vma, vmf->address,
1004				vmf->address + PAGE_SIZE, PAGE_SHIFT,
1005				pvt->kvm, pvt->gpa, vmf->page))
1006		return VM_FAULT_SIGBUS;
1007	else
1008		return 0;
1009}
1010
1011/*
1012 * Release the device PFN back to the pool
1013 *
1014 * Gets called when secure GFN tranistions from a secure-PFN
1015 * to a normal PFN during H_SVM_PAGE_OUT.
1016 * Gets called with kvm->arch.uvmem_lock held.
1017 */
1018static void kvmppc_uvmem_page_free(struct page *page)
1019{
1020	unsigned long pfn = page_to_pfn(page) -
1021			(kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT);
1022	struct kvmppc_uvmem_page_pvt *pvt;
1023
1024	spin_lock(&kvmppc_uvmem_bitmap_lock);
1025	bitmap_clear(kvmppc_uvmem_bitmap, pfn, 1);
1026	spin_unlock(&kvmppc_uvmem_bitmap_lock);
1027
1028	pvt = page->zone_device_data;
1029	page->zone_device_data = NULL;
1030	if (pvt->remove_gfn)
1031		kvmppc_gfn_remove(pvt->gpa >> PAGE_SHIFT, pvt->kvm);
1032	else
1033		kvmppc_gfn_secure_mem_pfn(pvt->gpa >> PAGE_SHIFT, pvt->kvm);
1034	kfree(pvt);
1035}
1036
1037static const struct dev_pagemap_ops kvmppc_uvmem_ops = {
1038	.page_free = kvmppc_uvmem_page_free,
1039	.migrate_to_ram	= kvmppc_uvmem_migrate_to_ram,
1040};
1041
1042/*
1043 * H_SVM_PAGE_OUT: Move page from secure memory to normal memory.
1044 */
1045unsigned long
1046kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa,
1047		      unsigned long flags, unsigned long page_shift)
1048{
1049	unsigned long gfn = gpa >> page_shift;
1050	unsigned long start, end;
1051	struct vm_area_struct *vma;
1052	int srcu_idx;
1053	int ret;
1054
1055	if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
1056		return H_UNSUPPORTED;
1057
1058	if (page_shift != PAGE_SHIFT)
1059		return H_P3;
1060
1061	if (flags)
1062		return H_P2;
1063
1064	ret = H_PARAMETER;
1065	srcu_idx = srcu_read_lock(&kvm->srcu);
1066	mmap_read_lock(kvm->mm);
1067	start = gfn_to_hva(kvm, gfn);
1068	if (kvm_is_error_hva(start))
1069		goto out;
1070
1071	end = start + (1UL << page_shift);
1072	vma = find_vma_intersection(kvm->mm, start, end);
1073	if (!vma || vma->vm_start > start || vma->vm_end < end)
1074		goto out;
1075
1076	if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa, NULL))
1077		ret = H_SUCCESS;
1078out:
1079	mmap_read_unlock(kvm->mm);
1080	srcu_read_unlock(&kvm->srcu, srcu_idx);
1081	return ret;
1082}
1083
1084int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
1085{
1086	unsigned long pfn;
1087	int ret = U_SUCCESS;
1088
1089	pfn = gfn_to_pfn(kvm, gfn);
1090	if (is_error_noslot_pfn(pfn))
1091		return -EFAULT;
1092
1093	mutex_lock(&kvm->arch.uvmem_lock);
1094	if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
1095		goto out;
1096
1097	ret = uv_page_in(kvm->arch.lpid, pfn << PAGE_SHIFT, gfn << PAGE_SHIFT,
1098			 0, PAGE_SHIFT);
1099out:
1100	kvm_release_pfn_clean(pfn);
1101	mutex_unlock(&kvm->arch.uvmem_lock);
1102	return (ret == U_SUCCESS) ? RESUME_GUEST : -EFAULT;
1103}
1104
1105int kvmppc_uvmem_memslot_create(struct kvm *kvm, const struct kvm_memory_slot *new)
1106{
1107	int ret = __kvmppc_uvmem_memslot_create(kvm, new);
1108
1109	if (!ret)
1110		ret = kvmppc_uv_migrate_mem_slot(kvm, new);
1111
1112	return ret;
1113}
1114
1115void kvmppc_uvmem_memslot_delete(struct kvm *kvm, const struct kvm_memory_slot *old)
1116{
1117	__kvmppc_uvmem_memslot_delete(kvm, old);
1118}
1119
1120static u64 kvmppc_get_secmem_size(void)
1121{
1122	struct device_node *np;
1123	int i, len;
1124	const __be32 *prop;
1125	u64 size = 0;
1126
1127	/*
1128	 * First try the new ibm,secure-memory nodes which supersede the
1129	 * secure-memory-ranges property.
1130	 * If we found some, no need to read the deprecated ones.
1131	 */
1132	for_each_compatible_node(np, NULL, "ibm,secure-memory") {
1133		prop = of_get_property(np, "reg", &len);
1134		if (!prop)
1135			continue;
1136		size += of_read_number(prop + 2, 2);
1137	}
1138	if (size)
1139		return size;
1140
1141	np = of_find_compatible_node(NULL, NULL, "ibm,uv-firmware");
1142	if (!np)
1143		goto out;
1144
1145	prop = of_get_property(np, "secure-memory-ranges", &len);
1146	if (!prop)
1147		goto out_put;
1148
1149	for (i = 0; i < len / (sizeof(*prop) * 4); i++)
1150		size += of_read_number(prop + (i * 4) + 2, 2);
1151
1152out_put:
1153	of_node_put(np);
1154out:
1155	return size;
1156}
1157
1158int kvmppc_uvmem_init(void)
1159{
1160	int ret = 0;
1161	unsigned long size;
1162	struct resource *res;
1163	void *addr;
1164	unsigned long pfn_last, pfn_first;
1165
1166	size = kvmppc_get_secmem_size();
1167	if (!size) {
1168		/*
1169		 * Don't fail the initialization of kvm-hv module if
1170		 * the platform doesn't export ibm,uv-firmware node.
1171		 * Let normal guests run on such PEF-disabled platform.
1172		 */
1173		pr_info("KVMPPC-UVMEM: No support for secure guests\n");
1174		goto out;
1175	}
1176
1177	res = request_free_mem_region(&iomem_resource, size, "kvmppc_uvmem");
1178	if (IS_ERR(res)) {
1179		ret = PTR_ERR(res);
1180		goto out;
1181	}
1182
1183	kvmppc_uvmem_pgmap.type = MEMORY_DEVICE_PRIVATE;
1184	kvmppc_uvmem_pgmap.range.start = res->start;
1185	kvmppc_uvmem_pgmap.range.end = res->end;
1186	kvmppc_uvmem_pgmap.nr_range = 1;
1187	kvmppc_uvmem_pgmap.ops = &kvmppc_uvmem_ops;
1188	/* just one global instance: */
1189	kvmppc_uvmem_pgmap.owner = &kvmppc_uvmem_pgmap;
1190	addr = memremap_pages(&kvmppc_uvmem_pgmap, NUMA_NO_NODE);
1191	if (IS_ERR(addr)) {
1192		ret = PTR_ERR(addr);
1193		goto out_free_region;
1194	}
1195
1196	pfn_first = res->start >> PAGE_SHIFT;
1197	pfn_last = pfn_first + (resource_size(res) >> PAGE_SHIFT);
1198	kvmppc_uvmem_bitmap = bitmap_zalloc(pfn_last - pfn_first, GFP_KERNEL);
1199	if (!kvmppc_uvmem_bitmap) {
1200		ret = -ENOMEM;
1201		goto out_unmap;
1202	}
1203
1204	pr_info("KVMPPC-UVMEM: Secure Memory size 0x%lx\n", size);
1205	return ret;
1206out_unmap:
1207	memunmap_pages(&kvmppc_uvmem_pgmap);
1208out_free_region:
1209	release_mem_region(res->start, size);
1210out:
1211	return ret;
1212}
1213
1214void kvmppc_uvmem_free(void)
1215{
1216	if (!kvmppc_uvmem_bitmap)
1217		return;
1218
1219	memunmap_pages(&kvmppc_uvmem_pgmap);
1220	release_mem_region(kvmppc_uvmem_pgmap.range.start,
1221			   range_len(&kvmppc_uvmem_pgmap.range));
1222	bitmap_free(kvmppc_uvmem_bitmap);
1223}
1224