1/*
2 * Copyright (c) 2005 Topspin Communications.  All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2013 Cisco Systems.  All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses.  You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * BSD license below:
11 *
12 *     Redistribution and use in source and binary forms, with or
13 *     without modification, are permitted provided that the following
14 *     conditions are met:
15 *
16 *      - Redistributions of source code must retain the above
17 *        copyright notice, this list of conditions and the following
18 *        disclaimer.
19 *
20 *      - Redistributions in binary form must reproduce the above
21 *        copyright notice, this list of conditions and the following
22 *        disclaimer in the documentation and/or other materials
23 *        provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/mm.h>
36#include <linux/dma-mapping.h>
37#include <linux/sched/signal.h>
38#include <linux/sched/mm.h>
39#include <linux/hugetlb.h>
40#include <linux/iommu.h>
41#include <linux/workqueue.h>
42#include <linux/list.h>
43#include <rdma/ib_verbs.h>
44
45#include "usnic_log.h"
46#include "usnic_uiom.h"
47#include "usnic_uiom_interval_tree.h"
48
49#define USNIC_UIOM_PAGE_CHUNK						\
50	((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list))	/\
51	((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] -	\
52	(void *) &((struct usnic_uiom_chunk *) 0)->page_list[0]))
53
54static int usnic_uiom_dma_fault(struct iommu_domain *domain,
55				struct device *dev,
56				unsigned long iova, int flags,
57				void *token)
58{
59	usnic_err("Device %s iommu fault domain 0x%pK va 0x%lx flags 0x%x\n",
60		dev_name(dev),
61		domain, iova, flags);
62	return -ENOSYS;
63}
64
65static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
66{
67	struct usnic_uiom_chunk *chunk, *tmp;
68	struct page *page;
69	struct scatterlist *sg;
70	int i;
71	dma_addr_t pa;
72
73	list_for_each_entry_safe(chunk, tmp, chunk_list, list) {
74		for_each_sg(chunk->page_list, sg, chunk->nents, i) {
75			page = sg_page(sg);
76			pa = sg_phys(sg);
77			unpin_user_pages_dirty_lock(&page, 1, dirty);
78			usnic_dbg("pa: %pa\n", &pa);
79		}
80		kfree(chunk);
81	}
82}
83
84static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
85				int dmasync, struct usnic_uiom_reg *uiomr)
86{
87	struct list_head *chunk_list = &uiomr->chunk_list;
88	unsigned int gup_flags = FOLL_LONGTERM;
89	struct page **page_list;
90	struct scatterlist *sg;
91	struct usnic_uiom_chunk *chunk;
92	unsigned long locked;
93	unsigned long lock_limit;
94	unsigned long cur_base;
95	unsigned long npages;
96	int ret;
97	int off;
98	int i;
99	dma_addr_t pa;
100	struct mm_struct *mm;
101
102	/*
103	 * If the combination of the addr and size requested for this memory
104	 * region causes an integer overflow, return error.
105	 */
106	if (((addr + size) < addr) || PAGE_ALIGN(addr + size) < (addr + size))
107		return -EINVAL;
108
109	if (!size)
110		return -EINVAL;
111
112	if (!can_do_mlock())
113		return -EPERM;
114
115	INIT_LIST_HEAD(chunk_list);
116
117	page_list = (struct page **) __get_free_page(GFP_KERNEL);
118	if (!page_list)
119		return -ENOMEM;
120
121	npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT;
122
123	uiomr->owning_mm = mm = current->mm;
124	mmap_read_lock(mm);
125
126	locked = atomic64_add_return(npages, &current->mm->pinned_vm);
127	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
128
129	if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
130		ret = -ENOMEM;
131		goto out;
132	}
133
134	if (writable)
135		gup_flags |= FOLL_WRITE;
136	cur_base = addr & PAGE_MASK;
137	ret = 0;
138
139	while (npages) {
140		ret = pin_user_pages(cur_base,
141				     min_t(unsigned long, npages,
142				     PAGE_SIZE / sizeof(struct page *)),
143				     gup_flags, page_list);
144
145		if (ret < 0)
146			goto out;
147
148		npages -= ret;
149		off = 0;
150
151		while (ret) {
152			chunk = kmalloc(struct_size(chunk, page_list,
153					min_t(int, ret, USNIC_UIOM_PAGE_CHUNK)),
154					GFP_KERNEL);
155			if (!chunk) {
156				ret = -ENOMEM;
157				goto out;
158			}
159
160			chunk->nents = min_t(int, ret, USNIC_UIOM_PAGE_CHUNK);
161			sg_init_table(chunk->page_list, chunk->nents);
162			for_each_sg(chunk->page_list, sg, chunk->nents, i) {
163				sg_set_page(sg, page_list[i + off],
164						PAGE_SIZE, 0);
165				pa = sg_phys(sg);
166				usnic_dbg("va: 0x%lx pa: %pa\n",
167						cur_base + i*PAGE_SIZE, &pa);
168			}
169			cur_base += chunk->nents * PAGE_SIZE;
170			ret -= chunk->nents;
171			off += chunk->nents;
172			list_add_tail(&chunk->list, chunk_list);
173		}
174
175		ret = 0;
176	}
177
178out:
179	if (ret < 0) {
180		usnic_uiom_put_pages(chunk_list, 0);
181		atomic64_sub(npages, &current->mm->pinned_vm);
182	} else
183		mmgrab(uiomr->owning_mm);
184
185	mmap_read_unlock(mm);
186	free_page((unsigned long) page_list);
187	return ret;
188}
189
190static void usnic_uiom_unmap_sorted_intervals(struct list_head *intervals,
191						struct usnic_uiom_pd *pd)
192{
193	struct usnic_uiom_interval_node *interval, *tmp;
194	long unsigned va, size;
195
196	list_for_each_entry_safe(interval, tmp, intervals, link) {
197		va = interval->start << PAGE_SHIFT;
198		size = ((interval->last - interval->start) + 1) << PAGE_SHIFT;
199		while (size > 0) {
200			/* Workaround for RH 970401 */
201			usnic_dbg("va 0x%lx size 0x%lx", va, PAGE_SIZE);
202			iommu_unmap(pd->domain, va, PAGE_SIZE);
203			va += PAGE_SIZE;
204			size -= PAGE_SIZE;
205		}
206	}
207}
208
209static void __usnic_uiom_reg_release(struct usnic_uiom_pd *pd,
210					struct usnic_uiom_reg *uiomr,
211					int dirty)
212{
213	int npages;
214	unsigned long vpn_start, vpn_last;
215	struct usnic_uiom_interval_node *interval, *tmp;
216	int writable = 0;
217	LIST_HEAD(rm_intervals);
218
219	npages = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
220	vpn_start = (uiomr->va & PAGE_MASK) >> PAGE_SHIFT;
221	vpn_last = vpn_start + npages - 1;
222
223	spin_lock(&pd->lock);
224	usnic_uiom_remove_interval(&pd->root, vpn_start,
225					vpn_last, &rm_intervals);
226	usnic_uiom_unmap_sorted_intervals(&rm_intervals, pd);
227
228	list_for_each_entry_safe(interval, tmp, &rm_intervals, link) {
229		if (interval->flags & IOMMU_WRITE)
230			writable = 1;
231		list_del(&interval->link);
232		kfree(interval);
233	}
234
235	usnic_uiom_put_pages(&uiomr->chunk_list, dirty & writable);
236	spin_unlock(&pd->lock);
237}
238
239static int usnic_uiom_map_sorted_intervals(struct list_head *intervals,
240						struct usnic_uiom_reg *uiomr)
241{
242	int i, err;
243	size_t size;
244	struct usnic_uiom_chunk *chunk;
245	struct usnic_uiom_interval_node *interval_node;
246	dma_addr_t pa;
247	dma_addr_t pa_start = 0;
248	dma_addr_t pa_end = 0;
249	long int va_start = -EINVAL;
250	struct usnic_uiom_pd *pd = uiomr->pd;
251	long int va = uiomr->va & PAGE_MASK;
252	int flags = IOMMU_READ | IOMMU_CACHE;
253
254	flags |= (uiomr->writable) ? IOMMU_WRITE : 0;
255	chunk = list_first_entry(&uiomr->chunk_list, struct usnic_uiom_chunk,
256									list);
257	list_for_each_entry(interval_node, intervals, link) {
258iter_chunk:
259		for (i = 0; i < chunk->nents; i++, va += PAGE_SIZE) {
260			pa = sg_phys(&chunk->page_list[i]);
261			if ((va >> PAGE_SHIFT) < interval_node->start)
262				continue;
263
264			if ((va >> PAGE_SHIFT) == interval_node->start) {
265				/* First page of the interval */
266				va_start = va;
267				pa_start = pa;
268				pa_end = pa;
269			}
270
271			WARN_ON(va_start == -EINVAL);
272
273			if ((pa_end + PAGE_SIZE != pa) &&
274					(pa != pa_start)) {
275				/* PAs are not contiguous */
276				size = pa_end - pa_start + PAGE_SIZE;
277				usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
278					va_start, &pa_start, size, flags);
279				err = iommu_map(pd->domain, va_start, pa_start,
280						size, flags, GFP_ATOMIC);
281				if (err) {
282					usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
283						va_start, &pa_start, size, err);
284					goto err_out;
285				}
286				va_start = va;
287				pa_start = pa;
288				pa_end = pa;
289			}
290
291			if ((va >> PAGE_SHIFT) == interval_node->last) {
292				/* Last page of the interval */
293				size = pa - pa_start + PAGE_SIZE;
294				usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
295					va_start, &pa_start, size, flags);
296				err = iommu_map(pd->domain, va_start, pa_start,
297						size, flags, GFP_ATOMIC);
298				if (err) {
299					usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
300						va_start, &pa_start, size, err);
301					goto err_out;
302				}
303				break;
304			}
305
306			if (pa != pa_start)
307				pa_end += PAGE_SIZE;
308		}
309
310		if (i == chunk->nents) {
311			/*
312			 * Hit last entry of the chunk,
313			 * hence advance to next chunk
314			 */
315			chunk = list_first_entry(&chunk->list,
316							struct usnic_uiom_chunk,
317							list);
318			goto iter_chunk;
319		}
320	}
321
322	return 0;
323
324err_out:
325	usnic_uiom_unmap_sorted_intervals(intervals, pd);
326	return err;
327}
328
329struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
330						unsigned long addr, size_t size,
331						int writable, int dmasync)
332{
333	struct usnic_uiom_reg *uiomr;
334	unsigned long va_base, vpn_start, vpn_last;
335	unsigned long npages;
336	int offset, err;
337	LIST_HEAD(sorted_diff_intervals);
338
339	/*
340	 * Intel IOMMU map throws an error if a translation entry is
341	 * changed from read to write.  This module may not unmap
342	 * and then remap the entry after fixing the permission
343	 * b/c this open up a small windows where hw DMA may page fault
344	 * Hence, make all entries to be writable.
345	 */
346	writable = 1;
347
348	va_base = addr & PAGE_MASK;
349	offset = addr & ~PAGE_MASK;
350	npages = PAGE_ALIGN(size + offset) >> PAGE_SHIFT;
351	vpn_start = (addr & PAGE_MASK) >> PAGE_SHIFT;
352	vpn_last = vpn_start + npages - 1;
353
354	uiomr = kmalloc(sizeof(*uiomr), GFP_KERNEL);
355	if (!uiomr)
356		return ERR_PTR(-ENOMEM);
357
358	uiomr->va = va_base;
359	uiomr->offset = offset;
360	uiomr->length = size;
361	uiomr->writable = writable;
362	uiomr->pd = pd;
363
364	err = usnic_uiom_get_pages(addr, size, writable, dmasync,
365				   uiomr);
366	if (err) {
367		usnic_err("Failed get_pages vpn [0x%lx,0x%lx] err %d\n",
368				vpn_start, vpn_last, err);
369		goto out_free_uiomr;
370	}
371
372	spin_lock(&pd->lock);
373	err = usnic_uiom_get_intervals_diff(vpn_start, vpn_last,
374						(writable) ? IOMMU_WRITE : 0,
375						IOMMU_WRITE,
376						&pd->root,
377						&sorted_diff_intervals);
378	if (err) {
379		usnic_err("Failed disjoint interval vpn [0x%lx,0x%lx] err %d\n",
380						vpn_start, vpn_last, err);
381		goto out_put_pages;
382	}
383
384	err = usnic_uiom_map_sorted_intervals(&sorted_diff_intervals, uiomr);
385	if (err) {
386		usnic_err("Failed map interval vpn [0x%lx,0x%lx] err %d\n",
387						vpn_start, vpn_last, err);
388		goto out_put_intervals;
389
390	}
391
392	err = usnic_uiom_insert_interval(&pd->root, vpn_start, vpn_last,
393					(writable) ? IOMMU_WRITE : 0);
394	if (err) {
395		usnic_err("Failed insert interval vpn [0x%lx,0x%lx] err %d\n",
396						vpn_start, vpn_last, err);
397		goto out_unmap_intervals;
398	}
399
400	usnic_uiom_put_interval_set(&sorted_diff_intervals);
401	spin_unlock(&pd->lock);
402
403	return uiomr;
404
405out_unmap_intervals:
406	usnic_uiom_unmap_sorted_intervals(&sorted_diff_intervals, pd);
407out_put_intervals:
408	usnic_uiom_put_interval_set(&sorted_diff_intervals);
409out_put_pages:
410	usnic_uiom_put_pages(&uiomr->chunk_list, 0);
411	spin_unlock(&pd->lock);
412	mmdrop(uiomr->owning_mm);
413out_free_uiomr:
414	kfree(uiomr);
415	return ERR_PTR(err);
416}
417
418static void __usnic_uiom_release_tail(struct usnic_uiom_reg *uiomr)
419{
420	mmdrop(uiomr->owning_mm);
421	kfree(uiomr);
422}
423
424static inline size_t usnic_uiom_num_pages(struct usnic_uiom_reg *uiomr)
425{
426	return PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
427}
428
429void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr)
430{
431	__usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
432
433	atomic64_sub(usnic_uiom_num_pages(uiomr), &uiomr->owning_mm->pinned_vm);
434	__usnic_uiom_release_tail(uiomr);
435}
436
437struct usnic_uiom_pd *usnic_uiom_alloc_pd(struct device *dev)
438{
439	struct usnic_uiom_pd *pd;
440	void *domain;
441
442	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
443	if (!pd)
444		return ERR_PTR(-ENOMEM);
445
446	pd->domain = domain = iommu_domain_alloc(dev->bus);
447	if (!domain) {
448		usnic_err("Failed to allocate IOMMU domain");
449		kfree(pd);
450		return ERR_PTR(-ENOMEM);
451	}
452
453	iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL);
454
455	spin_lock_init(&pd->lock);
456	INIT_LIST_HEAD(&pd->devs);
457
458	return pd;
459}
460
461void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd)
462{
463	iommu_domain_free(pd->domain);
464	kfree(pd);
465}
466
467int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev)
468{
469	struct usnic_uiom_dev *uiom_dev;
470	int err;
471
472	uiom_dev = kzalloc(sizeof(*uiom_dev), GFP_ATOMIC);
473	if (!uiom_dev)
474		return -ENOMEM;
475	uiom_dev->dev = dev;
476
477	err = iommu_attach_device(pd->domain, dev);
478	if (err)
479		goto out_free_dev;
480
481	if (!device_iommu_capable(dev, IOMMU_CAP_CACHE_COHERENCY)) {
482		usnic_err("IOMMU of %s does not support cache coherency\n",
483				dev_name(dev));
484		err = -EINVAL;
485		goto out_detach_device;
486	}
487
488	spin_lock(&pd->lock);
489	list_add_tail(&uiom_dev->link, &pd->devs);
490	pd->dev_cnt++;
491	spin_unlock(&pd->lock);
492
493	return 0;
494
495out_detach_device:
496	iommu_detach_device(pd->domain, dev);
497out_free_dev:
498	kfree(uiom_dev);
499	return err;
500}
501
502void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd, struct device *dev)
503{
504	struct usnic_uiom_dev *uiom_dev;
505	int found = 0;
506
507	spin_lock(&pd->lock);
508	list_for_each_entry(uiom_dev, &pd->devs, link) {
509		if (uiom_dev->dev == dev) {
510			found = 1;
511			break;
512		}
513	}
514
515	if (!found) {
516		usnic_err("Unable to free dev %s - not found\n",
517				dev_name(dev));
518		spin_unlock(&pd->lock);
519		return;
520	}
521
522	list_del(&uiom_dev->link);
523	pd->dev_cnt--;
524	spin_unlock(&pd->lock);
525
526	return iommu_detach_device(pd->domain, dev);
527}
528
529struct device **usnic_uiom_get_dev_list(struct usnic_uiom_pd *pd)
530{
531	struct usnic_uiom_dev *uiom_dev;
532	struct device **devs;
533	int i = 0;
534
535	spin_lock(&pd->lock);
536	devs = kcalloc(pd->dev_cnt + 1, sizeof(*devs), GFP_ATOMIC);
537	if (!devs) {
538		devs = ERR_PTR(-ENOMEM);
539		goto out;
540	}
541
542	list_for_each_entry(uiom_dev, &pd->devs, link) {
543		devs[i++] = uiom_dev->dev;
544	}
545out:
546	spin_unlock(&pd->lock);
547	return devs;
548}
549
550void usnic_uiom_free_dev_list(struct device **devs)
551{
552	kfree(devs);
553}
554