1// SPDX-License-Identifier: GPL-2.0
2/*  Copyright(c) 2016-20 Intel Corporation. */
3
4#include <linux/acpi.h>
5#include <linux/miscdevice.h>
6#include <linux/mman.h>
7#include <linux/security.h>
8#include <linux/suspend.h>
9#include <asm/traps.h>
10#include "driver.h"
11#include "encl.h"
12
13u64 sgx_attributes_reserved_mask;
14u64 sgx_xfrm_reserved_mask = ~0x3;
15u32 sgx_misc_reserved_mask;
16
17static int sgx_open(struct inode *inode, struct file *file)
18{
19	struct sgx_encl *encl;
20	int ret;
21
22	encl = kzalloc(sizeof(*encl), GFP_KERNEL);
23	if (!encl)
24		return -ENOMEM;
25
26	kref_init(&encl->refcount);
27	xa_init(&encl->page_array);
28	mutex_init(&encl->lock);
29	INIT_LIST_HEAD(&encl->va_pages);
30	INIT_LIST_HEAD(&encl->mm_list);
31	spin_lock_init(&encl->mm_lock);
32
33	ret = init_srcu_struct(&encl->srcu);
34	if (ret) {
35		kfree(encl);
36		return ret;
37	}
38
39	file->private_data = encl;
40
41	return 0;
42}
43
44static int sgx_release(struct inode *inode, struct file *file)
45{
46	struct sgx_encl *encl = file->private_data;
47	struct sgx_encl_mm *encl_mm;
48
49	/*
50	 * Drain the remaining mm_list entries. At this point the list contains
51	 * entries for processes, which have closed the enclave file but have
52	 * not exited yet. The processes, which have exited, are gone from the
53	 * list by sgx_mmu_notifier_release().
54	 */
55	for ( ; ; )  {
56		spin_lock(&encl->mm_lock);
57
58		if (list_empty(&encl->mm_list)) {
59			encl_mm = NULL;
60		} else {
61			encl_mm = list_first_entry(&encl->mm_list,
62						   struct sgx_encl_mm, list);
63			list_del_rcu(&encl_mm->list);
64		}
65
66		spin_unlock(&encl->mm_lock);
67
68		/* The enclave is no longer mapped by any mm. */
69		if (!encl_mm)
70			break;
71
72		synchronize_srcu(&encl->srcu);
73		mmu_notifier_unregister(&encl_mm->mmu_notifier, encl_mm->mm);
74		kfree(encl_mm);
75
76		/* 'encl_mm' is gone, put encl_mm->encl reference: */
77		kref_put(&encl->refcount, sgx_encl_release);
78	}
79
80	kref_put(&encl->refcount, sgx_encl_release);
81	return 0;
82}
83
84static int sgx_mmap(struct file *file, struct vm_area_struct *vma)
85{
86	struct sgx_encl *encl = file->private_data;
87	int ret;
88
89	ret = sgx_encl_may_map(encl, vma->vm_start, vma->vm_end, vma->vm_flags);
90	if (ret)
91		return ret;
92
93	ret = sgx_encl_mm_add(encl, vma->vm_mm);
94	if (ret)
95		return ret;
96
97	vma->vm_ops = &sgx_vm_ops;
98	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
99	vma->vm_private_data = encl;
100
101	return 0;
102}
103
104static unsigned long sgx_get_unmapped_area(struct file *file,
105					   unsigned long addr,
106					   unsigned long len,
107					   unsigned long pgoff,
108					   unsigned long flags)
109{
110	if ((flags & MAP_TYPE) == MAP_PRIVATE)
111		return -EINVAL;
112
113	if (flags & MAP_FIXED)
114		return addr;
115
116	return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
117}
118
119#ifdef CONFIG_COMPAT
120static long sgx_compat_ioctl(struct file *filep, unsigned int cmd,
121			      unsigned long arg)
122{
123	return sgx_ioctl(filep, cmd, arg);
124}
125#endif
126
127static const struct file_operations sgx_encl_fops = {
128	.owner			= THIS_MODULE,
129	.open			= sgx_open,
130	.release		= sgx_release,
131	.unlocked_ioctl		= sgx_ioctl,
132#ifdef CONFIG_COMPAT
133	.compat_ioctl		= sgx_compat_ioctl,
134#endif
135	.mmap			= sgx_mmap,
136	.get_unmapped_area	= sgx_get_unmapped_area,
137};
138
139static struct miscdevice sgx_dev_enclave = {
140	.minor = MISC_DYNAMIC_MINOR,
141	.name = "sgx_enclave",
142	.nodename = "sgx_enclave",
143	.fops = &sgx_encl_fops,
144};
145
146int __init sgx_drv_init(void)
147{
148	unsigned int eax, ebx, ecx, edx;
149	u64 attr_mask;
150	u64 xfrm_mask;
151	int ret;
152
153	if (!cpu_feature_enabled(X86_FEATURE_SGX_LC))
154		return -ENODEV;
155
156	cpuid_count(SGX_CPUID, 0, &eax, &ebx, &ecx, &edx);
157
158	if (!(eax & 1))  {
159		pr_err("SGX disabled: SGX1 instruction support not available.\n");
160		return -ENODEV;
161	}
162
163	sgx_misc_reserved_mask = ~ebx | SGX_MISC_RESERVED_MASK;
164
165	cpuid_count(SGX_CPUID, 1, &eax, &ebx, &ecx, &edx);
166
167	attr_mask = (((u64)ebx) << 32) + (u64)eax;
168	sgx_attributes_reserved_mask = ~attr_mask | SGX_ATTR_RESERVED_MASK;
169
170	if (cpu_feature_enabled(X86_FEATURE_OSXSAVE)) {
171		xfrm_mask = (((u64)edx) << 32) + (u64)ecx;
172		sgx_xfrm_reserved_mask = ~xfrm_mask;
173	}
174
175	ret = misc_register(&sgx_dev_enclave);
176	if (ret)
177		return ret;
178
179	return 0;
180}
181