1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Helpers for IOMMU drivers implementing SVA
4 */
5#include <linux/mmu_context.h>
6#include <linux/mutex.h>
7#include <linux/sched/mm.h>
8#include <linux/iommu.h>
9
10#include "iommu-priv.h"
11
12static DEFINE_MUTEX(iommu_sva_lock);
13
14/* Allocate a PASID for the mm within range (inclusive) */
15static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct device *dev)
16{
17	struct iommu_mm_data *iommu_mm;
18	ioasid_t pasid;
19
20	lockdep_assert_held(&iommu_sva_lock);
21
22	if (!arch_pgtable_dma_compat(mm))
23		return ERR_PTR(-EBUSY);
24
25	iommu_mm = mm->iommu_mm;
26	/* Is a PASID already associated with this mm? */
27	if (iommu_mm) {
28		if (iommu_mm->pasid >= dev->iommu->max_pasids)
29			return ERR_PTR(-EOVERFLOW);
30		return iommu_mm;
31	}
32
33	iommu_mm = kzalloc(sizeof(struct iommu_mm_data), GFP_KERNEL);
34	if (!iommu_mm)
35		return ERR_PTR(-ENOMEM);
36
37	pasid = iommu_alloc_global_pasid(dev);
38	if (pasid == IOMMU_PASID_INVALID) {
39		kfree(iommu_mm);
40		return ERR_PTR(-ENOSPC);
41	}
42	iommu_mm->pasid = pasid;
43	INIT_LIST_HEAD(&iommu_mm->sva_domains);
44	INIT_LIST_HEAD(&iommu_mm->sva_handles);
45	/*
46	 * Make sure the write to mm->iommu_mm is not reordered in front of
47	 * initialization to iommu_mm fields. If it does, readers may see a
48	 * valid iommu_mm with uninitialized values.
49	 */
50	smp_store_release(&mm->iommu_mm, iommu_mm);
51	return iommu_mm;
52}
53
54/**
55 * iommu_sva_bind_device() - Bind a process address space to a device
56 * @dev: the device
57 * @mm: the mm to bind, caller must hold a reference to mm_users
58 *
59 * Create a bond between device and address space, allowing the device to
60 * access the mm using the PASID returned by iommu_sva_get_pasid(). If a
61 * bond already exists between @device and @mm, an additional internal
62 * reference is taken. Caller must call iommu_sva_unbind_device()
63 * to release each reference.
64 *
65 * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
66 * initialize the required SVA features.
67 *
68 * On error, returns an ERR_PTR value.
69 */
70struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
71{
72	struct iommu_mm_data *iommu_mm;
73	struct iommu_domain *domain;
74	struct iommu_sva *handle;
75	int ret;
76
77	mutex_lock(&iommu_sva_lock);
78
79	/* Allocate mm->pasid if necessary. */
80	iommu_mm = iommu_alloc_mm_data(mm, dev);
81	if (IS_ERR(iommu_mm)) {
82		ret = PTR_ERR(iommu_mm);
83		goto out_unlock;
84	}
85
86	list_for_each_entry(handle, &mm->iommu_mm->sva_handles, handle_item) {
87		if (handle->dev == dev) {
88			refcount_inc(&handle->users);
89			mutex_unlock(&iommu_sva_lock);
90			return handle;
91		}
92	}
93
94	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
95	if (!handle) {
96		ret = -ENOMEM;
97		goto out_unlock;
98	}
99
100	/* Search for an existing domain. */
101	list_for_each_entry(domain, &mm->iommu_mm->sva_domains, next) {
102		ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid);
103		if (!ret) {
104			domain->users++;
105			goto out;
106		}
107	}
108
109	/* Allocate a new domain and set it on device pasid. */
110	domain = iommu_sva_domain_alloc(dev, mm);
111	if (!domain) {
112		ret = -ENOMEM;
113		goto out_free_handle;
114	}
115
116	ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid);
117	if (ret)
118		goto out_free_domain;
119	domain->users = 1;
120	list_add(&domain->next, &mm->iommu_mm->sva_domains);
121
122out:
123	refcount_set(&handle->users, 1);
124	list_add(&handle->handle_item, &mm->iommu_mm->sva_handles);
125	mutex_unlock(&iommu_sva_lock);
126	handle->dev = dev;
127	handle->domain = domain;
128	return handle;
129
130out_free_domain:
131	iommu_domain_free(domain);
132out_free_handle:
133	kfree(handle);
134out_unlock:
135	mutex_unlock(&iommu_sva_lock);
136	return ERR_PTR(ret);
137}
138EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
139
140/**
141 * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
142 * @handle: the handle returned by iommu_sva_bind_device()
143 *
144 * Put reference to a bond between device and address space. The device should
145 * not be issuing any more transaction for this PASID. All outstanding page
146 * requests for this PASID must have been flushed to the IOMMU.
147 */
148void iommu_sva_unbind_device(struct iommu_sva *handle)
149{
150	struct iommu_domain *domain = handle->domain;
151	struct iommu_mm_data *iommu_mm = domain->mm->iommu_mm;
152	struct device *dev = handle->dev;
153
154	mutex_lock(&iommu_sva_lock);
155	if (!refcount_dec_and_test(&handle->users)) {
156		mutex_unlock(&iommu_sva_lock);
157		return;
158	}
159	list_del(&handle->handle_item);
160
161	iommu_detach_device_pasid(domain, dev, iommu_mm->pasid);
162	if (--domain->users == 0) {
163		list_del(&domain->next);
164		iommu_domain_free(domain);
165	}
166	mutex_unlock(&iommu_sva_lock);
167	kfree(handle);
168}
169EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
170
171u32 iommu_sva_get_pasid(struct iommu_sva *handle)
172{
173	struct iommu_domain *domain = handle->domain;
174
175	return mm_get_enqcmd_pasid(domain->mm);
176}
177EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
178
179void mm_pasid_drop(struct mm_struct *mm)
180{
181	struct iommu_mm_data *iommu_mm = mm->iommu_mm;
182
183	if (!iommu_mm)
184		return;
185
186	iommu_free_global_pasid(iommu_mm->pasid);
187	kfree(iommu_mm);
188}
189
190/*
191 * I/O page fault handler for SVA
192 */
193static enum iommu_page_response_code
194iommu_sva_handle_mm(struct iommu_fault *fault, struct mm_struct *mm)
195{
196	vm_fault_t ret;
197	struct vm_area_struct *vma;
198	unsigned int access_flags = 0;
199	unsigned int fault_flags = FAULT_FLAG_REMOTE;
200	struct iommu_fault_page_request *prm = &fault->prm;
201	enum iommu_page_response_code status = IOMMU_PAGE_RESP_INVALID;
202
203	if (!(prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID))
204		return status;
205
206	if (!mmget_not_zero(mm))
207		return status;
208
209	mmap_read_lock(mm);
210
211	vma = vma_lookup(mm, prm->addr);
212	if (!vma)
213		/* Unmapped area */
214		goto out_put_mm;
215
216	if (prm->perm & IOMMU_FAULT_PERM_READ)
217		access_flags |= VM_READ;
218
219	if (prm->perm & IOMMU_FAULT_PERM_WRITE) {
220		access_flags |= VM_WRITE;
221		fault_flags |= FAULT_FLAG_WRITE;
222	}
223
224	if (prm->perm & IOMMU_FAULT_PERM_EXEC) {
225		access_flags |= VM_EXEC;
226		fault_flags |= FAULT_FLAG_INSTRUCTION;
227	}
228
229	if (!(prm->perm & IOMMU_FAULT_PERM_PRIV))
230		fault_flags |= FAULT_FLAG_USER;
231
232	if (access_flags & ~vma->vm_flags)
233		/* Access fault */
234		goto out_put_mm;
235
236	ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL);
237	status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID :
238		IOMMU_PAGE_RESP_SUCCESS;
239
240out_put_mm:
241	mmap_read_unlock(mm);
242	mmput(mm);
243
244	return status;
245}
246
247static void iommu_sva_handle_iopf(struct work_struct *work)
248{
249	struct iopf_fault *iopf;
250	struct iopf_group *group;
251	enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS;
252
253	group = container_of(work, struct iopf_group, work);
254	list_for_each_entry(iopf, &group->faults, list) {
255		/*
256		 * For the moment, errors are sticky: don't handle subsequent
257		 * faults in the group if there is an error.
258		 */
259		if (status != IOMMU_PAGE_RESP_SUCCESS)
260			break;
261
262		status = iommu_sva_handle_mm(&iopf->fault, group->domain->mm);
263	}
264
265	iopf_group_response(group, status);
266	iopf_free_group(group);
267}
268
269static int iommu_sva_iopf_handler(struct iopf_group *group)
270{
271	struct iommu_fault_param *fault_param = group->fault_param;
272
273	INIT_WORK(&group->work, iommu_sva_handle_iopf);
274	if (!queue_work(fault_param->queue->wq, &group->work))
275		return -EBUSY;
276
277	return 0;
278}
279
280struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
281					    struct mm_struct *mm)
282{
283	const struct iommu_ops *ops = dev_iommu_ops(dev);
284	struct iommu_domain *domain;
285
286	domain = ops->domain_alloc(IOMMU_DOMAIN_SVA);
287	if (!domain)
288		return NULL;
289
290	domain->type = IOMMU_DOMAIN_SVA;
291	mmgrab(mm);
292	domain->mm = mm;
293	domain->owner = ops;
294	domain->iopf_handler = iommu_sva_iopf_handler;
295
296	return domain;
297}
298