174462Salfred// SPDX-License-Identifier: GPL-2.0
274462Salfred/*
3261046Smav * Copyright (C) 2015-2018 Etnaviv Project
4261046Smav */
5261046Smav
68870Srgrimes#include <drm/drm_prime.h>
7261046Smav#include <linux/dma-mapping.h>
8261046Smav#include <linux/shmem_fs.h>
9261046Smav#include <linux/spinlock.h>
10261046Smav#include <linux/vmalloc.h>
11261046Smav
12261046Smav#include "etnaviv_drv.h"
13261046Smav#include "etnaviv_gem.h"
14261046Smav#include "etnaviv_gpu.h"
15261046Smav#include "etnaviv_mmu.h"
16261046Smav
17261046Smavstatic struct lock_class_key etnaviv_shm_lock_class;
18261046Smavstatic struct lock_class_key etnaviv_userptr_lock_class;
19261046Smav
20261046Smavstatic void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
21261046Smav{
22261046Smav	struct drm_device *dev = etnaviv_obj->base.dev;
23261046Smav	struct sg_table *sgt = etnaviv_obj->sgt;
24261046Smav
25261046Smav	/*
26261046Smav	 * For non-cached buffers, ensure the new pages are clean
27261046Smav	 * because display controller, GPU, etc. are not coherent.
28261046Smav	 */
291901Swollman	if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
301901Swollman		dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
311901Swollman}
32136581Sobrien
33136581Sobrienstatic void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
341901Swollman{
35136581Sobrien	struct drm_device *dev = etnaviv_obj->base.dev;
3692990Sobrien	struct sg_table *sgt = etnaviv_obj->sgt;
3792990Sobrien
381901Swollman	/*
391901Swollman	 * For non-cached buffers, ensure the new pages are clean
401901Swollman	 * because display controller, GPU, etc. are not coherent:
4117542Speter	 *
4217542Speter	 * WARNING: The DMA API does not support concurrent CPU
431901Swollman	 * and device access to the memory area.  With BIDIRECTIONAL,
441901Swollman	 * we will clean the cache lines which overlap the region,
4571579Sdeischen	 * and invalidate all cache lines (partially) contained in
461901Swollman	 * the region.
471901Swollman	 *
4874462Salfred	 * If you have dirty data in the overlapping cache lines,
491901Swollman	 * that will corrupt the GPU-written data.  If you have
5074462Salfred	 * written into the remainder of the region, this can
5174462Salfred	 * discard those writes.
5274462Salfred	 */
5311666Sphk	if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
5474462Salfred		dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
5574462Salfred}
5674462Salfred
5711666Sphk/* called with etnaviv_obj->lock held */
5871579Sdeischenstatic int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
591901Swollman{
601901Swollman	struct drm_device *dev = etnaviv_obj->base.dev;
611901Swollman	struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
621901Swollman
6317542Speter	if (IS_ERR(p)) {
6417542Speter		dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
651901Swollman		return PTR_ERR(p);
661901Swollman	}
671901Swollman
6856629Sshin	etnaviv_obj->pages = p;
6955918Sshin
7055918Sshin	return 0;
7156629Sshin}
7274462Salfred
7356629Sshinstatic void put_pages(struct etnaviv_gem_object *etnaviv_obj)
7455918Sshin{
7556629Sshin	if (etnaviv_obj->sgt) {
7655918Sshin		etnaviv_gem_scatterlist_unmap(etnaviv_obj);
7755918Sshin		sg_free_table(etnaviv_obj->sgt);
7855918Sshin		kfree(etnaviv_obj->sgt);
7956629Sshin		etnaviv_obj->sgt = NULL;
8056629Sshin	}
8156629Sshin	if (etnaviv_obj->pages) {
8274462Salfred		drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
8356629Sshin				  true, false);
8474462Salfred
8556629Sshin		etnaviv_obj->pages = NULL;
8674462Salfred	}
8774462Salfred}
8855918Sshin
8955918Sshinstruct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
9056629Sshin{
9156629Sshin	int ret;
9256629Sshin
9371579Sdeischen	lockdep_assert_held(&etnaviv_obj->lock);
9456629Sshin
9556629Sshin	if (!etnaviv_obj->pages) {
9656629Sshin		ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
9774462Salfred		if (ret < 0)
9856629Sshin			return ERR_PTR(ret);
9956629Sshin	}
10056629Sshin
10174462Salfred	if (!etnaviv_obj->sgt) {
10274462Salfred		struct drm_device *dev = etnaviv_obj->base.dev;
10356629Sshin		unsigned int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
10456629Sshin		struct sg_table *sgt;
10556629Sshin
10656629Sshin		sgt = drm_prime_pages_to_sg(dev, etnaviv_obj->pages, npages);
10756629Sshin		if (IS_ERR(sgt)) {
10874462Salfred			dev_err(dev->dev, "failed to allocate sgt: %ld\n",
10974462Salfred				PTR_ERR(sgt));
11074462Salfred			return ERR_CAST(sgt);
11174462Salfred		}
11256629Sshin
11356629Sshin		etnaviv_obj->sgt = sgt;
11456629Sshin
11556629Sshin		etnaviv_gem_scatter_map(etnaviv_obj);
11656629Sshin	}
11774462Salfred
11874462Salfred	return etnaviv_obj->pages;
11974462Salfred}
12074462Salfred
12156629Sshinvoid etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
12255918Sshin{
12355918Sshin	lockdep_assert_held(&etnaviv_obj->lock);
12456629Sshin	/* when we start tracking the pin count, then do something here */
12556629Sshin}
12655918Sshin
12774462Salfredstatic int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
12874462Salfred		struct vm_area_struct *vma)
12956629Sshin{
13071579Sdeischen	pgprot_t vm_page_prot;
13117542Speter
13256629Sshin	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
13317542Speter
13471579Sdeischen	vm_page_prot = vm_get_page_prot(vma->vm_flags);
13556629Sshin
13617542Speter	if (etnaviv_obj->flags & ETNA_BO_WC) {
13756629Sshin		vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
1381901Swollman	} else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
13917542Speter		vma->vm_page_prot = pgprot_noncached(vm_page_prot);
14071579Sdeischen	} else {
14117542Speter		/*
14274462Salfred		 * Shunt off cached objs to shmem file so they have their own
14317542Speter		 * address_space (so unmap_mapping_range does what we want,
14417542Speter		 * in particular in the case of mmap'd dmabufs)
14574462Salfred		 */
14671579Sdeischen		vma->vm_pgoff = 0;
14756629Sshin		vma_set_file(vma, etnaviv_obj->base.filp);
14817542Speter
14917542Speter		vma->vm_page_prot = vm_page_prot;
1501901Swollman	}
15117542Speter
15256629Sshin	return 0;
15374462Salfred}
15471579Sdeischen
15556629Sshinstatic int etnaviv_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
15656629Sshin{
15756629Sshin	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
1581901Swollman
15917542Speter	return etnaviv_obj->ops->mmap(etnaviv_obj, vma);
1601901Swollman}
161
162static vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
163{
164	struct vm_area_struct *vma = vmf->vma;
165	struct drm_gem_object *obj = vma->vm_private_data;
166	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
167	struct page **pages;
168	unsigned long pfn;
169	pgoff_t pgoff;
170	int err;
171
172	/*
173	 * Make sure we don't parallel update on a fault, nor move or remove
174	 * something from beneath our feet.  Note that vmf_insert_page() is
175	 * specifically coded to take care of this, so we don't have to.
176	 */
177	err = mutex_lock_interruptible(&etnaviv_obj->lock);
178	if (err)
179		return VM_FAULT_NOPAGE;
180	/* make sure we have pages attached now */
181	pages = etnaviv_gem_get_pages(etnaviv_obj);
182	mutex_unlock(&etnaviv_obj->lock);
183
184	if (IS_ERR(pages)) {
185		err = PTR_ERR(pages);
186		return vmf_error(err);
187	}
188
189	/* We don't use vmf->pgoff since that has the fake offset: */
190	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
191
192	pfn = page_to_pfn(pages[pgoff]);
193
194	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
195	     pfn, pfn << PAGE_SHIFT);
196
197	return vmf_insert_pfn(vma, vmf->address, pfn);
198}
199
200int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
201{
202	int ret;
203
204	/* Make it mmapable */
205	ret = drm_gem_create_mmap_offset(obj);
206	if (ret)
207		dev_err(obj->dev->dev, "could not allocate mmap offset\n");
208	else
209		*offset = drm_vma_node_offset_addr(&obj->vma_node);
210
211	return ret;
212}
213
214static struct etnaviv_vram_mapping *
215etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
216			     struct etnaviv_iommu_context *context)
217{
218	struct etnaviv_vram_mapping *mapping;
219
220	list_for_each_entry(mapping, &obj->vram_list, obj_node) {
221		if (mapping->context == context)
222			return mapping;
223	}
224
225	return NULL;
226}
227
228void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
229{
230	struct etnaviv_gem_object *etnaviv_obj = mapping->object;
231
232	mutex_lock(&etnaviv_obj->lock);
233	WARN_ON(mapping->use == 0);
234	mapping->use -= 1;
235	mutex_unlock(&etnaviv_obj->lock);
236
237	drm_gem_object_put(&etnaviv_obj->base);
238}
239
240struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
241	struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context,
242	u64 va)
243{
244	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
245	struct etnaviv_vram_mapping *mapping;
246	struct page **pages;
247	int ret = 0;
248
249	mutex_lock(&etnaviv_obj->lock);
250	mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context);
251	if (mapping) {
252		/*
253		 * Holding the object lock prevents the use count changing
254		 * beneath us.  If the use count is zero, the MMU might be
255		 * reaping this object, so take the lock and re-check that
256		 * the MMU owns this mapping to close this race.
257		 */
258		if (mapping->use == 0) {
259			mutex_lock(&mmu_context->lock);
260			if (mapping->context == mmu_context)
261				if (va && mapping->iova != va) {
262					etnaviv_iommu_reap_mapping(mapping);
263					mapping = NULL;
264				} else {
265					mapping->use += 1;
266				}
267			else
268				mapping = NULL;
269			mutex_unlock(&mmu_context->lock);
270			if (mapping)
271				goto out;
272		} else {
273			mapping->use += 1;
274			goto out;
275		}
276	}
277
278	pages = etnaviv_gem_get_pages(etnaviv_obj);
279	if (IS_ERR(pages)) {
280		ret = PTR_ERR(pages);
281		goto out;
282	}
283
284	/*
285	 * See if we have a reaped vram mapping we can re-use before
286	 * allocating a fresh mapping.
287	 */
288	mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
289	if (!mapping) {
290		mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
291		if (!mapping) {
292			ret = -ENOMEM;
293			goto out;
294		}
295
296		INIT_LIST_HEAD(&mapping->scan_node);
297		mapping->object = etnaviv_obj;
298	} else {
299		list_del(&mapping->obj_node);
300	}
301
302	mapping->use = 1;
303
304	ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
305				    mmu_context->global->memory_base,
306				    mapping, va);
307	if (ret < 0)
308		kfree(mapping);
309	else
310		list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
311
312out:
313	mutex_unlock(&etnaviv_obj->lock);
314
315	if (ret)
316		return ERR_PTR(ret);
317
318	/* Take a reference on the object */
319	drm_gem_object_get(obj);
320	return mapping;
321}
322
323void *etnaviv_gem_vmap(struct drm_gem_object *obj)
324{
325	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
326
327	if (etnaviv_obj->vaddr)
328		return etnaviv_obj->vaddr;
329
330	mutex_lock(&etnaviv_obj->lock);
331	/*
332	 * Need to check again, as we might have raced with another thread
333	 * while waiting for the mutex.
334	 */
335	if (!etnaviv_obj->vaddr)
336		etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
337	mutex_unlock(&etnaviv_obj->lock);
338
339	return etnaviv_obj->vaddr;
340}
341
342static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
343{
344	struct page **pages;
345
346	lockdep_assert_held(&obj->lock);
347
348	pages = etnaviv_gem_get_pages(obj);
349	if (IS_ERR(pages))
350		return NULL;
351
352	return vmap(pages, obj->base.size >> PAGE_SHIFT,
353			VM_MAP, pgprot_writecombine(PAGE_KERNEL));
354}
355
356static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
357{
358	if (op & ETNA_PREP_READ)
359		return DMA_FROM_DEVICE;
360	else if (op & ETNA_PREP_WRITE)
361		return DMA_TO_DEVICE;
362	else
363		return DMA_BIDIRECTIONAL;
364}
365
366int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
367		struct drm_etnaviv_timespec *timeout)
368{
369	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
370	struct drm_device *dev = obj->dev;
371	bool write = !!(op & ETNA_PREP_WRITE);
372	int ret;
373
374	if (!etnaviv_obj->sgt) {
375		void *ret;
376
377		mutex_lock(&etnaviv_obj->lock);
378		ret = etnaviv_gem_get_pages(etnaviv_obj);
379		mutex_unlock(&etnaviv_obj->lock);
380		if (IS_ERR(ret))
381			return PTR_ERR(ret);
382	}
383
384	if (op & ETNA_PREP_NOSYNC) {
385		if (!dma_resv_test_signaled(obj->resv,
386					    dma_resv_usage_rw(write)))
387			return -EBUSY;
388	} else {
389		unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
390
391		ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
392					    true, remain);
393		if (ret <= 0)
394			return ret == 0 ? -ETIMEDOUT : ret;
395	}
396
397	if (etnaviv_obj->flags & ETNA_BO_CACHED) {
398		dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt,
399					 etnaviv_op_to_dma_dir(op));
400		etnaviv_obj->last_cpu_prep_op = op;
401	}
402
403	return 0;
404}
405
406int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
407{
408	struct drm_device *dev = obj->dev;
409	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
410
411	if (etnaviv_obj->flags & ETNA_BO_CACHED) {
412		/* fini without a prep is almost certainly a userspace error */
413		WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
414		dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt,
415			etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
416		etnaviv_obj->last_cpu_prep_op = 0;
417	}
418
419	return 0;
420}
421
422int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
423	struct drm_etnaviv_timespec *timeout)
424{
425	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
426
427	return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
428}
429
430#ifdef CONFIG_DEBUG_FS
431static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
432{
433	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
434	struct dma_resv *robj = obj->resv;
435	unsigned long off = drm_vma_node_start(&obj->vma_node);
436	int r;
437
438	seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
439			etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
440			obj->name, kref_read(&obj->refcount),
441			off, etnaviv_obj->vaddr, obj->size);
442
443	r = dma_resv_lock(robj, NULL);
444	if (r)
445		return;
446
447	dma_resv_describe(robj, m);
448	dma_resv_unlock(robj);
449}
450
451void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
452	struct seq_file *m)
453{
454	struct etnaviv_gem_object *etnaviv_obj;
455	int count = 0;
456	size_t size = 0;
457
458	mutex_lock(&priv->gem_lock);
459	list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
460		struct drm_gem_object *obj = &etnaviv_obj->base;
461
462		seq_puts(m, "   ");
463		etnaviv_gem_describe(obj, m);
464		count++;
465		size += obj->size;
466	}
467	mutex_unlock(&priv->gem_lock);
468
469	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
470}
471#endif
472
473static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
474{
475	vunmap(etnaviv_obj->vaddr);
476	put_pages(etnaviv_obj);
477}
478
479static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
480	.get_pages = etnaviv_gem_shmem_get_pages,
481	.release = etnaviv_gem_shmem_release,
482	.vmap = etnaviv_gem_vmap_impl,
483	.mmap = etnaviv_gem_mmap_obj,
484};
485
486void etnaviv_gem_free_object(struct drm_gem_object *obj)
487{
488	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
489	struct etnaviv_drm_private *priv = obj->dev->dev_private;
490	struct etnaviv_vram_mapping *mapping, *tmp;
491
492	/* object should not be active */
493	WARN_ON(is_active(etnaviv_obj));
494
495	mutex_lock(&priv->gem_lock);
496	list_del(&etnaviv_obj->gem_node);
497	mutex_unlock(&priv->gem_lock);
498
499	list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
500				 obj_node) {
501		struct etnaviv_iommu_context *context = mapping->context;
502
503		WARN_ON(mapping->use);
504
505		if (context)
506			etnaviv_iommu_unmap_gem(context, mapping);
507
508		list_del(&mapping->obj_node);
509		kfree(mapping);
510	}
511
512	etnaviv_obj->ops->release(etnaviv_obj);
513	drm_gem_object_release(obj);
514
515	kfree(etnaviv_obj);
516}
517
518void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
519{
520	struct etnaviv_drm_private *priv = dev->dev_private;
521	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
522
523	mutex_lock(&priv->gem_lock);
524	list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
525	mutex_unlock(&priv->gem_lock);
526}
527
528static const struct vm_operations_struct vm_ops = {
529	.fault = etnaviv_gem_fault,
530	.open = drm_gem_vm_open,
531	.close = drm_gem_vm_close,
532};
533
534static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = {
535	.free = etnaviv_gem_free_object,
536	.pin = etnaviv_gem_prime_pin,
537	.unpin = etnaviv_gem_prime_unpin,
538	.get_sg_table = etnaviv_gem_prime_get_sg_table,
539	.vmap = etnaviv_gem_prime_vmap,
540	.mmap = etnaviv_gem_mmap,
541	.vm_ops = &vm_ops,
542};
543
544static int etnaviv_gem_new_impl(struct drm_device *dev, u32 flags,
545	const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
546{
547	struct etnaviv_gem_object *etnaviv_obj;
548	unsigned sz = sizeof(*etnaviv_obj);
549	bool valid = true;
550
551	/* validate flags */
552	switch (flags & ETNA_BO_CACHE_MASK) {
553	case ETNA_BO_UNCACHED:
554	case ETNA_BO_CACHED:
555	case ETNA_BO_WC:
556		break;
557	default:
558		valid = false;
559	}
560
561	if (!valid) {
562		dev_err(dev->dev, "invalid cache flag: %x\n",
563			(flags & ETNA_BO_CACHE_MASK));
564		return -EINVAL;
565	}
566
567	etnaviv_obj = kzalloc(sz, GFP_KERNEL);
568	if (!etnaviv_obj)
569		return -ENOMEM;
570
571	etnaviv_obj->flags = flags;
572	etnaviv_obj->ops = ops;
573
574	mutex_init(&etnaviv_obj->lock);
575	INIT_LIST_HEAD(&etnaviv_obj->vram_list);
576
577	*obj = &etnaviv_obj->base;
578	(*obj)->funcs = &etnaviv_gem_object_funcs;
579
580	return 0;
581}
582
583/* convenience method to construct a GEM buffer object, and userspace handle */
584int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
585	u32 size, u32 flags, u32 *handle)
586{
587	struct etnaviv_drm_private *priv = dev->dev_private;
588	struct drm_gem_object *obj = NULL;
589	int ret;
590
591	size = PAGE_ALIGN(size);
592
593	ret = etnaviv_gem_new_impl(dev, flags, &etnaviv_gem_shmem_ops, &obj);
594	if (ret)
595		goto fail;
596
597	lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
598
599	ret = drm_gem_object_init(dev, obj, size);
600	if (ret)
601		goto fail;
602
603	/*
604	 * Our buffers are kept pinned, so allocating them from the MOVABLE
605	 * zone is a really bad idea, and conflicts with CMA. See comments
606	 * above new_inode() why this is required _and_ expected if you're
607	 * going to pin these pages.
608	 */
609	mapping_set_gfp_mask(obj->filp->f_mapping, priv->shm_gfp_mask);
610
611	etnaviv_gem_obj_add(dev, obj);
612
613	ret = drm_gem_handle_create(file, obj, handle);
614
615	/* drop reference from allocate - handle holds it now */
616fail:
617	drm_gem_object_put(obj);
618
619	return ret;
620}
621
622int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
623	const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res)
624{
625	struct drm_gem_object *obj;
626	int ret;
627
628	ret = etnaviv_gem_new_impl(dev, flags, ops, &obj);
629	if (ret)
630		return ret;
631
632	drm_gem_private_object_init(dev, obj, size);
633
634	*res = to_etnaviv_bo(obj);
635
636	return 0;
637}
638
639static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
640{
641	struct page **pvec = NULL;
642	struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
643	int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
644	unsigned int gup_flags = FOLL_LONGTERM;
645
646	might_lock_read(&current->mm->mmap_lock);
647
648	if (userptr->mm != current->mm)
649		return -EPERM;
650
651	pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
652	if (!pvec)
653		return -ENOMEM;
654
655	if (!userptr->ro)
656		gup_flags |= FOLL_WRITE;
657
658	do {
659		unsigned num_pages = npages - pinned;
660		uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
661		struct page **pages = pvec + pinned;
662
663		ret = pin_user_pages_fast(ptr, num_pages, gup_flags, pages);
664		if (ret < 0) {
665			unpin_user_pages(pvec, pinned);
666			kvfree(pvec);
667			return ret;
668		}
669
670		pinned += ret;
671
672	} while (pinned < npages);
673
674	etnaviv_obj->pages = pvec;
675
676	return 0;
677}
678
679static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
680{
681	if (etnaviv_obj->sgt) {
682		etnaviv_gem_scatterlist_unmap(etnaviv_obj);
683		sg_free_table(etnaviv_obj->sgt);
684		kfree(etnaviv_obj->sgt);
685	}
686	if (etnaviv_obj->pages) {
687		int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
688
689		unpin_user_pages(etnaviv_obj->pages, npages);
690		kvfree(etnaviv_obj->pages);
691	}
692}
693
694static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
695		struct vm_area_struct *vma)
696{
697	return -EINVAL;
698}
699
700static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
701	.get_pages = etnaviv_gem_userptr_get_pages,
702	.release = etnaviv_gem_userptr_release,
703	.vmap = etnaviv_gem_vmap_impl,
704	.mmap = etnaviv_gem_userptr_mmap_obj,
705};
706
707int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
708	uintptr_t ptr, u32 size, u32 flags, u32 *handle)
709{
710	struct etnaviv_gem_object *etnaviv_obj;
711	int ret;
712
713	ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED,
714				      &etnaviv_gem_userptr_ops, &etnaviv_obj);
715	if (ret)
716		return ret;
717
718	lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
719
720	etnaviv_obj->userptr.ptr = ptr;
721	etnaviv_obj->userptr.mm = current->mm;
722	etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
723
724	etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
725
726	ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
727
728	/* drop reference from allocate - handle holds it now */
729	drm_gem_object_put(&etnaviv_obj->base);
730	return ret;
731}
732