1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Support for Medifield PNW Camera Imaging ISP subsystem.
4 *
5 * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
6 *
7 * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 *
19 */
20/*
21 * This file contains functions for buffer object structure management
22 */
23#include <linux/kernel.h>
24#include <linux/types.h>
25#include <linux/gfp.h>		/* for GFP_ATOMIC */
26#include <linux/mm.h>
27#include <linux/mm_types.h>
28#include <linux/hugetlb.h>
29#include <linux/highmem.h>
30#include <linux/slab.h>		/* for kmalloc */
31#include <linux/module.h>
32#include <linux/moduleparam.h>
33#include <linux/string.h>
34#include <linux/list.h>
35#include <linux/errno.h>
36#include <linux/io.h>
37#include <asm/current.h>
38#include <linux/sched/signal.h>
39#include <linux/file.h>
40
41#include <asm/set_memory.h>
42
43#include "atomisp_internal.h"
44#include "hmm/hmm_common.h"
45#include "hmm/hmm_bo.h"
46
47static int __bo_init(struct hmm_bo_device *bdev, struct hmm_buffer_object *bo,
48		     unsigned int pgnr)
49{
50	check_bodev_null_return(bdev, -EINVAL);
51	var_equal_return(hmm_bo_device_inited(bdev), 0, -EINVAL,
52			 "hmm_bo_device not inited yet.\n");
53	/* prevent zero size buffer object */
54	if (pgnr == 0) {
55		dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
56		return -EINVAL;
57	}
58
59	memset(bo, 0, sizeof(*bo));
60	mutex_init(&bo->mutex);
61
62	/* init the bo->list HEAD as an element of entire_bo_list */
63	INIT_LIST_HEAD(&bo->list);
64
65	bo->bdev = bdev;
66	bo->vmap_addr = NULL;
67	bo->status = HMM_BO_FREE;
68	bo->start = bdev->start;
69	bo->pgnr = pgnr;
70	bo->end = bo->start + pgnr_to_size(pgnr);
71	bo->prev = NULL;
72	bo->next = NULL;
73
74	return 0;
75}
76
77static struct hmm_buffer_object *__bo_search_and_remove_from_free_rbtree(
78    struct rb_node *node, unsigned int pgnr)
79{
80	struct hmm_buffer_object *this, *ret_bo, *temp_bo;
81
82	this = rb_entry(node, struct hmm_buffer_object, node);
83	if (this->pgnr == pgnr ||
84	    (this->pgnr > pgnr && !this->node.rb_left)) {
85		goto remove_bo_and_return;
86	} else {
87		if (this->pgnr < pgnr) {
88			if (!this->node.rb_right)
89				return NULL;
90			ret_bo = __bo_search_and_remove_from_free_rbtree(
91				     this->node.rb_right, pgnr);
92		} else {
93			ret_bo = __bo_search_and_remove_from_free_rbtree(
94				     this->node.rb_left, pgnr);
95		}
96		if (!ret_bo) {
97			if (this->pgnr > pgnr)
98				goto remove_bo_and_return;
99			else
100				return NULL;
101		}
102		return ret_bo;
103	}
104
105remove_bo_and_return:
106	/* NOTE: All nodes on free rbtree have a 'prev' that points to NULL.
107	 * 1. check if 'this->next' is NULL:
108	 *	yes: erase 'this' node and rebalance rbtree, return 'this'.
109	 */
110	if (!this->next) {
111		rb_erase(&this->node, &this->bdev->free_rbtree);
112		return this;
113	}
114	/* NOTE: if 'this->next' is not NULL, always return 'this->next' bo.
115	 * 2. check if 'this->next->next' is NULL:
116	 *	yes: change the related 'next/prev' pointer,
117	 *		return 'this->next' but the rbtree stays unchanged.
118	 */
119	temp_bo = this->next;
120	this->next = temp_bo->next;
121	if (temp_bo->next)
122		temp_bo->next->prev = this;
123	temp_bo->next = NULL;
124	temp_bo->prev = NULL;
125	return temp_bo;
126}
127
128static struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root,
129	ia_css_ptr start)
130{
131	struct rb_node *n = root->rb_node;
132	struct hmm_buffer_object *bo;
133
134	do {
135		bo = rb_entry(n, struct hmm_buffer_object, node);
136
137		if (bo->start > start) {
138			if (!n->rb_left)
139				return NULL;
140			n = n->rb_left;
141		} else if (bo->start < start) {
142			if (!n->rb_right)
143				return NULL;
144			n = n->rb_right;
145		} else {
146			return bo;
147		}
148	} while (n);
149
150	return NULL;
151}
152
153static struct hmm_buffer_object *__bo_search_by_addr_in_range(
154    struct rb_root *root, unsigned int start)
155{
156	struct rb_node *n = root->rb_node;
157	struct hmm_buffer_object *bo;
158
159	do {
160		bo = rb_entry(n, struct hmm_buffer_object, node);
161
162		if (bo->start > start) {
163			if (!n->rb_left)
164				return NULL;
165			n = n->rb_left;
166		} else {
167			if (bo->end > start)
168				return bo;
169			if (!n->rb_right)
170				return NULL;
171			n = n->rb_right;
172		}
173	} while (n);
174
175	return NULL;
176}
177
178static void __bo_insert_to_free_rbtree(struct rb_root *root,
179				       struct hmm_buffer_object *bo)
180{
181	struct rb_node **new = &root->rb_node;
182	struct rb_node *parent = NULL;
183	struct hmm_buffer_object *this;
184	unsigned int pgnr = bo->pgnr;
185
186	while (*new) {
187		parent = *new;
188		this = container_of(*new, struct hmm_buffer_object, node);
189
190		if (pgnr < this->pgnr) {
191			new = &((*new)->rb_left);
192		} else if (pgnr > this->pgnr) {
193			new = &((*new)->rb_right);
194		} else {
195			bo->prev = this;
196			bo->next = this->next;
197			if (this->next)
198				this->next->prev = bo;
199			this->next = bo;
200			bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
201			return;
202		}
203	}
204
205	bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
206
207	rb_link_node(&bo->node, parent, new);
208	rb_insert_color(&bo->node, root);
209}
210
211static void __bo_insert_to_alloc_rbtree(struct rb_root *root,
212					struct hmm_buffer_object *bo)
213{
214	struct rb_node **new = &root->rb_node;
215	struct rb_node *parent = NULL;
216	struct hmm_buffer_object *this;
217	unsigned int start = bo->start;
218
219	while (*new) {
220		parent = *new;
221		this = container_of(*new, struct hmm_buffer_object, node);
222
223		if (start < this->start)
224			new = &((*new)->rb_left);
225		else
226			new = &((*new)->rb_right);
227	}
228
229	kref_init(&bo->kref);
230	bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_ALLOCED;
231
232	rb_link_node(&bo->node, parent, new);
233	rb_insert_color(&bo->node, root);
234}
235
236static struct hmm_buffer_object *__bo_break_up(struct hmm_bo_device *bdev,
237	struct hmm_buffer_object *bo,
238	unsigned int pgnr)
239{
240	struct hmm_buffer_object *new_bo;
241	unsigned long flags;
242	int ret;
243
244	new_bo = kmem_cache_alloc(bdev->bo_cache, GFP_KERNEL);
245	if (!new_bo) {
246		dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
247		return NULL;
248	}
249	ret = __bo_init(bdev, new_bo, pgnr);
250	if (ret) {
251		dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
252		kmem_cache_free(bdev->bo_cache, new_bo);
253		return NULL;
254	}
255
256	new_bo->start = bo->start;
257	new_bo->end = new_bo->start + pgnr_to_size(pgnr);
258	bo->start = new_bo->end;
259	bo->pgnr = bo->pgnr - pgnr;
260
261	spin_lock_irqsave(&bdev->list_lock, flags);
262	list_add_tail(&new_bo->list, &bo->list);
263	spin_unlock_irqrestore(&bdev->list_lock, flags);
264
265	return new_bo;
266}
267
268static void __bo_take_off_handling(struct hmm_buffer_object *bo)
269{
270	struct hmm_bo_device *bdev = bo->bdev;
271	/* There are 4 situations when we take off a known bo from free rbtree:
272	 * 1. if bo->next && bo->prev == NULL, bo is a rbtree node
273	 *	and does not have a linked list after bo, to take off this bo,
274	 *	we just need erase bo directly and rebalance the free rbtree
275	 */
276	if (!bo->prev && !bo->next) {
277		rb_erase(&bo->node, &bdev->free_rbtree);
278		/* 2. when bo->next != NULL && bo->prev == NULL, bo is a rbtree node,
279		 *	and has a linked list,to take off this bo we need erase bo
280		 *	first, then, insert bo->next into free rbtree and rebalance
281		 *	the free rbtree
282		 */
283	} else if (!bo->prev && bo->next) {
284		bo->next->prev = NULL;
285		rb_erase(&bo->node, &bdev->free_rbtree);
286		__bo_insert_to_free_rbtree(&bdev->free_rbtree, bo->next);
287		bo->next = NULL;
288		/* 3. when bo->prev != NULL && bo->next == NULL, bo is not a rbtree
289		 *	node, bo is the last element of the linked list after rbtree
290		 *	node, to take off this bo, we just need set the "prev/next"
291		 *	pointers to NULL, the free rbtree stays unchaged
292		 */
293	} else if (bo->prev && !bo->next) {
294		bo->prev->next = NULL;
295		bo->prev = NULL;
296		/* 4. when bo->prev != NULL && bo->next != NULL ,bo is not a rbtree
297		 *	node, bo is in the middle of the linked list after rbtree node,
298		 *	to take off this bo, we just set take the "prev/next" pointers
299		 *	to NULL, the free rbtree stays unchaged
300		 */
301	} else if (bo->prev && bo->next) {
302		bo->next->prev = bo->prev;
303		bo->prev->next = bo->next;
304		bo->next = NULL;
305		bo->prev = NULL;
306	}
307}
308
309static struct hmm_buffer_object *__bo_merge(struct hmm_buffer_object *bo,
310	struct hmm_buffer_object *next_bo)
311{
312	struct hmm_bo_device *bdev;
313	unsigned long flags;
314
315	bdev = bo->bdev;
316	next_bo->start = bo->start;
317	next_bo->pgnr = next_bo->pgnr + bo->pgnr;
318
319	spin_lock_irqsave(&bdev->list_lock, flags);
320	list_del(&bo->list);
321	spin_unlock_irqrestore(&bdev->list_lock, flags);
322
323	kmem_cache_free(bo->bdev->bo_cache, bo);
324
325	return next_bo;
326}
327
328/*
329 * hmm_bo_device functions.
330 */
331int hmm_bo_device_init(struct hmm_bo_device *bdev,
332		       struct isp_mmu_client *mmu_driver,
333		       unsigned int vaddr_start,
334		       unsigned int size)
335{
336	struct hmm_buffer_object *bo;
337	unsigned long flags;
338	int ret;
339
340	check_bodev_null_return(bdev, -EINVAL);
341
342	ret = isp_mmu_init(&bdev->mmu, mmu_driver);
343	if (ret) {
344		dev_err(atomisp_dev, "isp_mmu_init failed.\n");
345		return ret;
346	}
347
348	bdev->start = vaddr_start;
349	bdev->pgnr = size_to_pgnr_ceil(size);
350	bdev->size = pgnr_to_size(bdev->pgnr);
351
352	spin_lock_init(&bdev->list_lock);
353	mutex_init(&bdev->rbtree_mutex);
354
355	bdev->flag = HMM_BO_DEVICE_INITED;
356
357	INIT_LIST_HEAD(&bdev->entire_bo_list);
358	bdev->allocated_rbtree = RB_ROOT;
359	bdev->free_rbtree = RB_ROOT;
360
361	bdev->bo_cache = kmem_cache_create("bo_cache",
362					   sizeof(struct hmm_buffer_object), 0, 0, NULL);
363	if (!bdev->bo_cache) {
364		dev_err(atomisp_dev, "%s: create cache failed!\n", __func__);
365		isp_mmu_exit(&bdev->mmu);
366		return -ENOMEM;
367	}
368
369	bo = kmem_cache_alloc(bdev->bo_cache, GFP_KERNEL);
370	if (!bo) {
371		dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
372		isp_mmu_exit(&bdev->mmu);
373		return -ENOMEM;
374	}
375
376	ret = __bo_init(bdev, bo, bdev->pgnr);
377	if (ret) {
378		dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
379		kmem_cache_free(bdev->bo_cache, bo);
380		isp_mmu_exit(&bdev->mmu);
381		return -EINVAL;
382	}
383
384	spin_lock_irqsave(&bdev->list_lock, flags);
385	list_add_tail(&bo->list, &bdev->entire_bo_list);
386	spin_unlock_irqrestore(&bdev->list_lock, flags);
387
388	__bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
389
390	return 0;
391}
392
393struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev,
394				       unsigned int pgnr)
395{
396	struct hmm_buffer_object *bo, *new_bo;
397	struct rb_root *root = &bdev->free_rbtree;
398
399	check_bodev_null_return(bdev, NULL);
400	var_equal_return(hmm_bo_device_inited(bdev), 0, NULL,
401			 "hmm_bo_device not inited yet.\n");
402
403	if (pgnr == 0) {
404		dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
405		return NULL;
406	}
407
408	mutex_lock(&bdev->rbtree_mutex);
409	bo = __bo_search_and_remove_from_free_rbtree(root->rb_node, pgnr);
410	if (!bo) {
411		mutex_unlock(&bdev->rbtree_mutex);
412		dev_err(atomisp_dev, "%s: Out of Memory! hmm_bo_alloc failed",
413			__func__);
414		return NULL;
415	}
416
417	if (bo->pgnr > pgnr) {
418		new_bo = __bo_break_up(bdev, bo, pgnr);
419		if (!new_bo) {
420			mutex_unlock(&bdev->rbtree_mutex);
421			dev_err(atomisp_dev, "%s: __bo_break_up failed!\n",
422				__func__);
423			return NULL;
424		}
425
426		__bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, new_bo);
427		__bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
428
429		mutex_unlock(&bdev->rbtree_mutex);
430		return new_bo;
431	}
432
433	__bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, bo);
434
435	mutex_unlock(&bdev->rbtree_mutex);
436	return bo;
437}
438
439void hmm_bo_release(struct hmm_buffer_object *bo)
440{
441	struct hmm_bo_device *bdev = bo->bdev;
442	struct hmm_buffer_object *next_bo, *prev_bo;
443
444	mutex_lock(&bdev->rbtree_mutex);
445
446	/*
447	 * FIX ME:
448	 *
449	 * how to destroy the bo when it is stilled MMAPED?
450	 *
451	 * ideally, this will not happened as hmm_bo_release
452	 * will only be called when kref reaches 0, and in mmap
453	 * operation the hmm_bo_ref will eventually be called.
454	 * so, if this happened, something goes wrong.
455	 */
456	if (bo->status & HMM_BO_MMAPED) {
457		mutex_unlock(&bdev->rbtree_mutex);
458		dev_dbg(atomisp_dev, "destroy bo which is MMAPED, do nothing\n");
459		return;
460	}
461
462	if (bo->status & HMM_BO_BINDED) {
463		dev_warn(atomisp_dev, "the bo is still binded, unbind it first...\n");
464		hmm_bo_unbind(bo);
465	}
466
467	if (bo->status & HMM_BO_PAGE_ALLOCED) {
468		dev_warn(atomisp_dev, "the pages is not freed, free pages first\n");
469		hmm_bo_free_pages(bo);
470	}
471	if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
472		dev_warn(atomisp_dev, "the vunmap is not done, do it...\n");
473		hmm_bo_vunmap(bo);
474	}
475
476	rb_erase(&bo->node, &bdev->allocated_rbtree);
477
478	prev_bo = list_entry(bo->list.prev, struct hmm_buffer_object, list);
479	next_bo = list_entry(bo->list.next, struct hmm_buffer_object, list);
480
481	if (bo->list.prev != &bdev->entire_bo_list &&
482	    prev_bo->end == bo->start &&
483	    (prev_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
484		__bo_take_off_handling(prev_bo);
485		bo = __bo_merge(prev_bo, bo);
486	}
487
488	if (bo->list.next != &bdev->entire_bo_list &&
489	    next_bo->start == bo->end &&
490	    (next_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
491		__bo_take_off_handling(next_bo);
492		bo = __bo_merge(bo, next_bo);
493	}
494
495	__bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
496
497	mutex_unlock(&bdev->rbtree_mutex);
498	return;
499}
500
501void hmm_bo_device_exit(struct hmm_bo_device *bdev)
502{
503	struct hmm_buffer_object *bo;
504	unsigned long flags;
505
506	dev_dbg(atomisp_dev, "%s: entering!\n", __func__);
507
508	check_bodev_null_return_void(bdev);
509
510	/*
511	 * release all allocated bos even they a in use
512	 * and all bos will be merged into a big bo
513	 */
514	while (!RB_EMPTY_ROOT(&bdev->allocated_rbtree))
515		hmm_bo_release(
516		    rbtree_node_to_hmm_bo(bdev->allocated_rbtree.rb_node));
517
518	dev_dbg(atomisp_dev, "%s: finished releasing all allocated bos!\n",
519		__func__);
520
521	/* free all bos to release all ISP virtual memory */
522	while (!list_empty(&bdev->entire_bo_list)) {
523		bo = list_to_hmm_bo(bdev->entire_bo_list.next);
524
525		spin_lock_irqsave(&bdev->list_lock, flags);
526		list_del(&bo->list);
527		spin_unlock_irqrestore(&bdev->list_lock, flags);
528
529		kmem_cache_free(bdev->bo_cache, bo);
530	}
531
532	dev_dbg(atomisp_dev, "%s: finished to free all bos!\n", __func__);
533
534	kmem_cache_destroy(bdev->bo_cache);
535
536	isp_mmu_exit(&bdev->mmu);
537}
538
539int hmm_bo_device_inited(struct hmm_bo_device *bdev)
540{
541	check_bodev_null_return(bdev, -EINVAL);
542
543	return bdev->flag == HMM_BO_DEVICE_INITED;
544}
545
546int hmm_bo_allocated(struct hmm_buffer_object *bo)
547{
548	check_bo_null_return(bo, 0);
549
550	return bo->status & HMM_BO_ALLOCED;
551}
552
553struct hmm_buffer_object *hmm_bo_device_search_start(
554    struct hmm_bo_device *bdev, ia_css_ptr vaddr)
555{
556	struct hmm_buffer_object *bo;
557
558	check_bodev_null_return(bdev, NULL);
559
560	mutex_lock(&bdev->rbtree_mutex);
561	bo = __bo_search_by_addr(&bdev->allocated_rbtree, vaddr);
562	if (!bo) {
563		mutex_unlock(&bdev->rbtree_mutex);
564		dev_err(atomisp_dev, "%s can not find bo with addr: 0x%x\n",
565			__func__, vaddr);
566		return NULL;
567	}
568	mutex_unlock(&bdev->rbtree_mutex);
569
570	return bo;
571}
572
573struct hmm_buffer_object *hmm_bo_device_search_in_range(
574    struct hmm_bo_device *bdev, unsigned int vaddr)
575{
576	struct hmm_buffer_object *bo;
577
578	check_bodev_null_return(bdev, NULL);
579
580	mutex_lock(&bdev->rbtree_mutex);
581	bo = __bo_search_by_addr_in_range(&bdev->allocated_rbtree, vaddr);
582	if (!bo) {
583		mutex_unlock(&bdev->rbtree_mutex);
584		dev_err(atomisp_dev, "%s can not find bo contain addr: 0x%x\n",
585			__func__, vaddr);
586		return NULL;
587	}
588	mutex_unlock(&bdev->rbtree_mutex);
589
590	return bo;
591}
592
593struct hmm_buffer_object *hmm_bo_device_search_vmap_start(
594    struct hmm_bo_device *bdev, const void *vaddr)
595{
596	struct list_head *pos;
597	struct hmm_buffer_object *bo;
598	unsigned long flags;
599
600	check_bodev_null_return(bdev, NULL);
601
602	spin_lock_irqsave(&bdev->list_lock, flags);
603	list_for_each(pos, &bdev->entire_bo_list) {
604		bo = list_to_hmm_bo(pos);
605		/* pass bo which has no vm_node allocated */
606		if ((bo->status & HMM_BO_MASK) == HMM_BO_FREE)
607			continue;
608		if (bo->vmap_addr == vaddr)
609			goto found;
610	}
611	spin_unlock_irqrestore(&bdev->list_lock, flags);
612	return NULL;
613found:
614	spin_unlock_irqrestore(&bdev->list_lock, flags);
615	return bo;
616}
617
618static void free_pages_bulk_array(unsigned long nr_pages, struct page **page_array)
619{
620	unsigned long i;
621
622	for (i = 0; i < nr_pages; i++)
623		__free_pages(page_array[i], 0);
624}
625
626static void free_private_bo_pages(struct hmm_buffer_object *bo)
627{
628	set_pages_array_wb(bo->pages, bo->pgnr);
629	free_pages_bulk_array(bo->pgnr, bo->pages);
630}
631
632/*Allocate pages which will be used only by ISP*/
633static int alloc_private_pages(struct hmm_buffer_object *bo)
634{
635	const gfp_t gfp = __GFP_NOWARN | __GFP_RECLAIM | __GFP_FS;
636	int ret;
637
638	ret = alloc_pages_bulk_array(gfp, bo->pgnr, bo->pages);
639	if (ret != bo->pgnr) {
640		free_pages_bulk_array(ret, bo->pages);
641		dev_err(atomisp_dev, "alloc_pages_bulk_array() failed\n");
642		return -ENOMEM;
643	}
644
645	ret = set_pages_array_uc(bo->pages, bo->pgnr);
646	if (ret) {
647		dev_err(atomisp_dev, "set pages uncacheable failed.\n");
648		free_pages_bulk_array(bo->pgnr, bo->pages);
649		return ret;
650	}
651
652	return 0;
653}
654
655static int alloc_vmalloc_pages(struct hmm_buffer_object *bo, void *vmalloc_addr)
656{
657	void *vaddr = vmalloc_addr;
658	int i;
659
660	for (i = 0; i < bo->pgnr; i++) {
661		bo->pages[i] = vmalloc_to_page(vaddr);
662		if (!bo->pages[i]) {
663			dev_err(atomisp_dev, "Error could not get page %d of vmalloc buf\n", i);
664			return -ENOMEM;
665		}
666		vaddr += PAGE_SIZE;
667	}
668
669	return 0;
670}
671
672/*
673 * allocate/free physical pages for the bo.
674 *
675 * type indicate where are the pages from. currently we have 3 types
676 * of memory: HMM_BO_PRIVATE, HMM_BO_VMALLOC.
677 *
678 * vmalloc_addr is only valid when type is HMM_BO_VMALLOC.
679 */
680int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
681		       enum hmm_bo_type type,
682		       void *vmalloc_addr)
683{
684	int ret = -EINVAL;
685
686	check_bo_null_return(bo, -EINVAL);
687
688	mutex_lock(&bo->mutex);
689	check_bo_status_no_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
690
691	bo->pages = kcalloc(bo->pgnr, sizeof(struct page *), GFP_KERNEL);
692	if (unlikely(!bo->pages)) {
693		ret = -ENOMEM;
694		goto alloc_err;
695	}
696
697	if (type == HMM_BO_PRIVATE) {
698		ret = alloc_private_pages(bo);
699	} else if (type == HMM_BO_VMALLOC) {
700		ret = alloc_vmalloc_pages(bo, vmalloc_addr);
701	} else {
702		dev_err(atomisp_dev, "invalid buffer type.\n");
703		ret = -EINVAL;
704	}
705	if (ret)
706		goto alloc_err;
707
708	bo->type = type;
709
710	bo->status |= HMM_BO_PAGE_ALLOCED;
711
712	mutex_unlock(&bo->mutex);
713
714	return 0;
715
716alloc_err:
717	kfree(bo->pages);
718	mutex_unlock(&bo->mutex);
719	dev_err(atomisp_dev, "alloc pages err...\n");
720	return ret;
721status_err:
722	mutex_unlock(&bo->mutex);
723	dev_err(atomisp_dev,
724		"buffer object has already page allocated.\n");
725	return -EINVAL;
726}
727
728/*
729 * free physical pages of the bo.
730 */
731void hmm_bo_free_pages(struct hmm_buffer_object *bo)
732{
733	check_bo_null_return_void(bo);
734
735	mutex_lock(&bo->mutex);
736
737	check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err2);
738
739	/* clear the flag anyway. */
740	bo->status &= (~HMM_BO_PAGE_ALLOCED);
741
742	if (bo->type == HMM_BO_PRIVATE)
743		free_private_bo_pages(bo);
744	else if (bo->type == HMM_BO_VMALLOC)
745		; /* No-op, nothing to do */
746	else
747		dev_err(atomisp_dev, "invalid buffer type.\n");
748
749	kfree(bo->pages);
750	mutex_unlock(&bo->mutex);
751
752	return;
753
754status_err2:
755	mutex_unlock(&bo->mutex);
756	dev_err(atomisp_dev,
757		"buffer object not page allocated yet.\n");
758}
759
760int hmm_bo_page_allocated(struct hmm_buffer_object *bo)
761{
762	check_bo_null_return(bo, 0);
763
764	return bo->status & HMM_BO_PAGE_ALLOCED;
765}
766
767/*
768 * bind the physical pages to a virtual address space.
769 */
770int hmm_bo_bind(struct hmm_buffer_object *bo)
771{
772	int ret;
773	unsigned int virt;
774	struct hmm_bo_device *bdev;
775	unsigned int i;
776
777	check_bo_null_return(bo, -EINVAL);
778
779	mutex_lock(&bo->mutex);
780
781	check_bo_status_yes_goto(bo,
782				 HMM_BO_PAGE_ALLOCED | HMM_BO_ALLOCED,
783				 status_err1);
784
785	check_bo_status_no_goto(bo, HMM_BO_BINDED, status_err2);
786
787	bdev = bo->bdev;
788
789	virt = bo->start;
790
791	for (i = 0; i < bo->pgnr; i++) {
792		ret =
793		    isp_mmu_map(&bdev->mmu, virt,
794				page_to_phys(bo->pages[i]), 1);
795		if (ret)
796			goto map_err;
797		virt += (1 << PAGE_SHIFT);
798	}
799
800	/*
801	 * flush TBL here.
802	 *
803	 * theoretically, we donot need to flush TLB as we didnot change
804	 * any existed address mappings, but for Silicon Hive's MMU, its
805	 * really a bug here. I guess when fetching PTEs (page table entity)
806	 * to TLB, its MMU will fetch additional INVALID PTEs automatically
807	 * for performance issue. EX, we only set up 1 page address mapping,
808	 * meaning updating 1 PTE, but the MMU fetches 4 PTE at one time,
809	 * so the additional 3 PTEs are invalid.
810	 */
811	if (bo->start != 0x0)
812		isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
813					(bo->pgnr << PAGE_SHIFT));
814
815	bo->status |= HMM_BO_BINDED;
816
817	mutex_unlock(&bo->mutex);
818
819	return 0;
820
821map_err:
822	/* unbind the physical pages with related virtual address space */
823	virt = bo->start;
824	for ( ; i > 0; i--) {
825		isp_mmu_unmap(&bdev->mmu, virt, 1);
826		virt += pgnr_to_size(1);
827	}
828
829	mutex_unlock(&bo->mutex);
830	dev_err(atomisp_dev,
831		"setup MMU address mapping failed.\n");
832	return ret;
833
834status_err2:
835	mutex_unlock(&bo->mutex);
836	dev_err(atomisp_dev, "buffer object already binded.\n");
837	return -EINVAL;
838status_err1:
839	mutex_unlock(&bo->mutex);
840	dev_err(atomisp_dev,
841		"buffer object vm_node or page not allocated.\n");
842	return -EINVAL;
843}
844
845/*
846 * unbind the physical pages with related virtual address space.
847 */
848void hmm_bo_unbind(struct hmm_buffer_object *bo)
849{
850	unsigned int virt;
851	struct hmm_bo_device *bdev;
852	unsigned int i;
853
854	check_bo_null_return_void(bo);
855
856	mutex_lock(&bo->mutex);
857
858	check_bo_status_yes_goto(bo,
859				 HMM_BO_PAGE_ALLOCED |
860				 HMM_BO_ALLOCED |
861				 HMM_BO_BINDED, status_err);
862
863	bdev = bo->bdev;
864
865	virt = bo->start;
866
867	for (i = 0; i < bo->pgnr; i++) {
868		isp_mmu_unmap(&bdev->mmu, virt, 1);
869		virt += pgnr_to_size(1);
870	}
871
872	/*
873	 * flush TLB as the address mapping has been removed and
874	 * related TLBs should be invalidated.
875	 */
876	isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
877				(bo->pgnr << PAGE_SHIFT));
878
879	bo->status &= (~HMM_BO_BINDED);
880
881	mutex_unlock(&bo->mutex);
882
883	return;
884
885status_err:
886	mutex_unlock(&bo->mutex);
887	dev_err(atomisp_dev,
888		"buffer vm or page not allocated or not binded yet.\n");
889}
890
891int hmm_bo_binded(struct hmm_buffer_object *bo)
892{
893	int ret;
894
895	check_bo_null_return(bo, 0);
896
897	mutex_lock(&bo->mutex);
898
899	ret = bo->status & HMM_BO_BINDED;
900
901	mutex_unlock(&bo->mutex);
902
903	return ret;
904}
905
906void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached)
907{
908	check_bo_null_return(bo, NULL);
909
910	mutex_lock(&bo->mutex);
911	if (((bo->status & HMM_BO_VMAPED) && !cached) ||
912	    ((bo->status & HMM_BO_VMAPED_CACHED) && cached)) {
913		mutex_unlock(&bo->mutex);
914		return bo->vmap_addr;
915	}
916
917	/* cached status need to be changed, so vunmap first */
918	if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
919		vunmap(bo->vmap_addr);
920		bo->vmap_addr = NULL;
921		bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
922	}
923
924	bo->vmap_addr = vmap(bo->pages, bo->pgnr, VM_MAP,
925			     cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE);
926	if (unlikely(!bo->vmap_addr)) {
927		mutex_unlock(&bo->mutex);
928		dev_err(atomisp_dev, "vmap failed...\n");
929		return NULL;
930	}
931	bo->status |= (cached ? HMM_BO_VMAPED_CACHED : HMM_BO_VMAPED);
932
933	mutex_unlock(&bo->mutex);
934	return bo->vmap_addr;
935}
936
937void hmm_bo_flush_vmap(struct hmm_buffer_object *bo)
938{
939	check_bo_null_return_void(bo);
940
941	mutex_lock(&bo->mutex);
942	if (!(bo->status & HMM_BO_VMAPED_CACHED) || !bo->vmap_addr) {
943		mutex_unlock(&bo->mutex);
944		return;
945	}
946
947	clflush_cache_range(bo->vmap_addr, bo->pgnr * PAGE_SIZE);
948	mutex_unlock(&bo->mutex);
949}
950
951void hmm_bo_vunmap(struct hmm_buffer_object *bo)
952{
953	check_bo_null_return_void(bo);
954
955	mutex_lock(&bo->mutex);
956	if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
957		vunmap(bo->vmap_addr);
958		bo->vmap_addr = NULL;
959		bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
960	}
961
962	mutex_unlock(&bo->mutex);
963	return;
964}
965
966void hmm_bo_ref(struct hmm_buffer_object *bo)
967{
968	check_bo_null_return_void(bo);
969
970	kref_get(&bo->kref);
971}
972
973static void kref_hmm_bo_release(struct kref *kref)
974{
975	if (!kref)
976		return;
977
978	hmm_bo_release(kref_to_hmm_bo(kref));
979}
980
981void hmm_bo_unref(struct hmm_buffer_object *bo)
982{
983	check_bo_null_return_void(bo);
984
985	kref_put(&bo->kref, kref_hmm_bo_release);
986}
987
988static void hmm_bo_vm_open(struct vm_area_struct *vma)
989{
990	struct hmm_buffer_object *bo =
991	    (struct hmm_buffer_object *)vma->vm_private_data;
992
993	check_bo_null_return_void(bo);
994
995	hmm_bo_ref(bo);
996
997	mutex_lock(&bo->mutex);
998
999	bo->status |= HMM_BO_MMAPED;
1000
1001	bo->mmap_count++;
1002
1003	mutex_unlock(&bo->mutex);
1004}
1005
1006static void hmm_bo_vm_close(struct vm_area_struct *vma)
1007{
1008	struct hmm_buffer_object *bo =
1009	    (struct hmm_buffer_object *)vma->vm_private_data;
1010
1011	check_bo_null_return_void(bo);
1012
1013	hmm_bo_unref(bo);
1014
1015	mutex_lock(&bo->mutex);
1016
1017	bo->mmap_count--;
1018
1019	if (!bo->mmap_count) {
1020		bo->status &= (~HMM_BO_MMAPED);
1021		vma->vm_private_data = NULL;
1022	}
1023
1024	mutex_unlock(&bo->mutex);
1025}
1026
1027static const struct vm_operations_struct hmm_bo_vm_ops = {
1028	.open = hmm_bo_vm_open,
1029	.close = hmm_bo_vm_close,
1030};
1031
1032/*
1033 * mmap the bo to user space.
1034 */
1035int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo)
1036{
1037	unsigned int start, end;
1038	unsigned int virt;
1039	unsigned int pgnr, i;
1040	unsigned int pfn;
1041
1042	check_bo_null_return(bo, -EINVAL);
1043
1044	check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1045
1046	pgnr = bo->pgnr;
1047	start = vma->vm_start;
1048	end = vma->vm_end;
1049
1050	/*
1051	 * check vma's virtual address space size and buffer object's size.
1052	 * must be the same.
1053	 */
1054	if ((start + pgnr_to_size(pgnr)) != end) {
1055		dev_warn(atomisp_dev,
1056			 "vma's address space size not equal to buffer object's size");
1057		return -EINVAL;
1058	}
1059
1060	virt = vma->vm_start;
1061	for (i = 0; i < pgnr; i++) {
1062		pfn = page_to_pfn(bo->pages[i]);
1063		if (remap_pfn_range(vma, virt, pfn, PAGE_SIZE, PAGE_SHARED)) {
1064			dev_warn(atomisp_dev,
1065				 "remap_pfn_range failed: virt = 0x%x, pfn = 0x%x, mapped_pgnr = %d\n",
1066				 virt, pfn, 1);
1067			return -EINVAL;
1068		}
1069		virt += PAGE_SIZE;
1070	}
1071
1072	vma->vm_private_data = bo;
1073
1074	vma->vm_ops = &hmm_bo_vm_ops;
1075	vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
1076
1077	/*
1078	 * call hmm_bo_vm_open explicitly.
1079	 */
1080	hmm_bo_vm_open(vma);
1081
1082	return 0;
1083
1084status_err:
1085	dev_err(atomisp_dev, "buffer page not allocated yet.\n");
1086	return -EINVAL;
1087}
1088