1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Support for Medifield PNW Camera Imaging ISP subsystem.
4 *
5 * Copyright (c) 2010-2017 Intel Corporation. All Rights Reserved.
6 *
7 * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 *
19 */
20/*
21 * This file contains entry functions for memory management of ISP driver
22 */
23#include <linux/kernel.h>
24#include <linux/types.h>
25#include <linux/mm.h>
26#include <linux/highmem.h>	/* for kmap */
27#include <linux/io.h>		/* for page_to_phys */
28#include <linux/sysfs.h>
29
30#include "hmm/hmm.h"
31#include "hmm/hmm_bo.h"
32
33#include "atomisp_internal.h"
34#include "asm/cacheflush.h"
35#include "mmu/isp_mmu.h"
36#include "mmu/sh_mmu_mrfld.h"
37
38struct hmm_bo_device bo_device;
39static ia_css_ptr dummy_ptr = mmgr_EXCEPTION;
40static bool hmm_initialized;
41
42/*
43 * p: private
44 * v: vmalloc
45 */
46static const char hmm_bo_type_string[] = "pv";
47
48static ssize_t bo_show(struct device *dev, struct device_attribute *attr,
49		       char *buf, struct list_head *bo_list, bool active)
50{
51	ssize_t ret = 0;
52	struct hmm_buffer_object *bo;
53	unsigned long flags;
54	int i;
55	long total[HMM_BO_LAST] = { 0 };
56	long count[HMM_BO_LAST] = { 0 };
57	int index1 = 0;
58	int index2 = 0;
59
60	ret = scnprintf(buf, PAGE_SIZE, "type pgnr\n");
61	if (ret <= 0)
62		return 0;
63
64	index1 += ret;
65
66	spin_lock_irqsave(&bo_device.list_lock, flags);
67	list_for_each_entry(bo, bo_list, list) {
68		if ((active && (bo->status & HMM_BO_ALLOCED)) ||
69		    (!active && !(bo->status & HMM_BO_ALLOCED))) {
70			ret = scnprintf(buf + index1, PAGE_SIZE - index1,
71					"%c %d\n",
72					hmm_bo_type_string[bo->type], bo->pgnr);
73
74			total[bo->type] += bo->pgnr;
75			count[bo->type]++;
76			if (ret > 0)
77				index1 += ret;
78		}
79	}
80	spin_unlock_irqrestore(&bo_device.list_lock, flags);
81
82	for (i = 0; i < HMM_BO_LAST; i++) {
83		if (count[i]) {
84			ret = scnprintf(buf + index1 + index2,
85					PAGE_SIZE - index1 - index2,
86					"%ld %c buffer objects: %ld KB\n",
87					count[i], hmm_bo_type_string[i],
88					total[i] * 4);
89			if (ret > 0)
90				index2 += ret;
91		}
92	}
93
94	/* Add trailing zero, not included by scnprintf */
95	return index1 + index2 + 1;
96}
97
98static ssize_t active_bo_show(struct device *dev, struct device_attribute *attr,
99			      char *buf)
100{
101	return bo_show(dev, attr, buf, &bo_device.entire_bo_list, true);
102}
103
104static ssize_t free_bo_show(struct device *dev, struct device_attribute *attr,
105			    char *buf)
106{
107	return bo_show(dev, attr, buf, &bo_device.entire_bo_list, false);
108}
109
110
111static DEVICE_ATTR_RO(active_bo);
112static DEVICE_ATTR_RO(free_bo);
113
114static struct attribute *sysfs_attrs_ctrl[] = {
115	&dev_attr_active_bo.attr,
116	&dev_attr_free_bo.attr,
117	NULL
118};
119
120static struct attribute_group atomisp_attribute_group[] = {
121	{.attrs = sysfs_attrs_ctrl },
122};
123
124int hmm_init(void)
125{
126	int ret;
127
128	ret = hmm_bo_device_init(&bo_device, &sh_mmu_mrfld,
129				 ISP_VM_START, ISP_VM_SIZE);
130	if (ret)
131		dev_err(atomisp_dev, "hmm_bo_device_init failed.\n");
132
133	hmm_initialized = true;
134
135	/*
136	 * As hmm use NULL to indicate invalid ISP virtual address,
137	 * and ISP_VM_START is defined to 0 too, so we allocate
138	 * one piece of dummy memory, which should return value 0,
139	 * at the beginning, to avoid hmm_alloc return 0 in the
140	 * further allocation.
141	 */
142	dummy_ptr = hmm_alloc(1);
143
144	if (!ret) {
145		ret = sysfs_create_group(&atomisp_dev->kobj,
146					 atomisp_attribute_group);
147		if (ret)
148			dev_err(atomisp_dev,
149				"%s Failed to create sysfs\n", __func__);
150	}
151
152	return ret;
153}
154
155void hmm_cleanup(void)
156{
157	if (dummy_ptr == mmgr_EXCEPTION)
158		return;
159	sysfs_remove_group(&atomisp_dev->kobj, atomisp_attribute_group);
160
161	/* free dummy memory first */
162	hmm_free(dummy_ptr);
163	dummy_ptr = 0;
164
165	hmm_bo_device_exit(&bo_device);
166	hmm_initialized = false;
167}
168
169static ia_css_ptr __hmm_alloc(size_t bytes, enum hmm_bo_type type,
170			      void *vmalloc_addr)
171{
172	unsigned int pgnr;
173	struct hmm_buffer_object *bo;
174	int ret;
175
176	/*
177	 * Check if we are initialized. In the ideal world we wouldn't need
178	 * this but we can tackle it once the driver is a lot cleaner
179	 */
180
181	if (!hmm_initialized)
182		hmm_init();
183	/* Get page number from size */
184	pgnr = size_to_pgnr_ceil(bytes);
185
186	/* Buffer object structure init */
187	bo = hmm_bo_alloc(&bo_device, pgnr);
188	if (!bo) {
189		dev_err(atomisp_dev, "hmm_bo_create failed.\n");
190		goto create_bo_err;
191	}
192
193	/* Allocate pages for memory */
194	ret = hmm_bo_alloc_pages(bo, type, vmalloc_addr);
195	if (ret) {
196		dev_err(atomisp_dev, "hmm_bo_alloc_pages failed.\n");
197		goto alloc_page_err;
198	}
199
200	/* Combine the virtual address and pages together */
201	ret = hmm_bo_bind(bo);
202	if (ret) {
203		dev_err(atomisp_dev, "hmm_bo_bind failed.\n");
204		goto bind_err;
205	}
206
207	dev_dbg(atomisp_dev, "pages: 0x%08x (%zu bytes), type: %d, vmalloc %p\n",
208		bo->start, bytes, type, vmalloc);
209
210	return bo->start;
211
212bind_err:
213	hmm_bo_free_pages(bo);
214alloc_page_err:
215	hmm_bo_unref(bo);
216create_bo_err:
217	return 0;
218}
219
220ia_css_ptr hmm_alloc(size_t bytes)
221{
222	return __hmm_alloc(bytes, HMM_BO_PRIVATE, NULL);
223}
224
225ia_css_ptr hmm_create_from_vmalloc_buf(size_t bytes, void *vmalloc_addr)
226{
227	return __hmm_alloc(bytes, HMM_BO_VMALLOC, vmalloc_addr);
228}
229
230void hmm_free(ia_css_ptr virt)
231{
232	struct hmm_buffer_object *bo;
233
234	dev_dbg(atomisp_dev, "%s: free 0x%08x\n", __func__, virt);
235
236	if (WARN_ON(virt == mmgr_EXCEPTION))
237		return;
238
239	bo = hmm_bo_device_search_start(&bo_device, (unsigned int)virt);
240
241	if (!bo) {
242		dev_err(atomisp_dev,
243			"can not find buffer object start with address 0x%x\n",
244			(unsigned int)virt);
245		return;
246	}
247
248	hmm_bo_unbind(bo);
249	hmm_bo_free_pages(bo);
250	hmm_bo_unref(bo);
251}
252
253static inline int hmm_check_bo(struct hmm_buffer_object *bo, unsigned int ptr)
254{
255	if (!bo) {
256		dev_err(atomisp_dev,
257			"can not find buffer object contains address 0x%x\n",
258			ptr);
259		return -EINVAL;
260	}
261
262	if (!hmm_bo_page_allocated(bo)) {
263		dev_err(atomisp_dev,
264			"buffer object has no page allocated.\n");
265		return -EINVAL;
266	}
267
268	if (!hmm_bo_allocated(bo)) {
269		dev_err(atomisp_dev,
270			"buffer object has no virtual address space allocated.\n");
271		return -EINVAL;
272	}
273
274	return 0;
275}
276
277/* Read function in ISP memory management */
278static int load_and_flush_by_kmap(ia_css_ptr virt, void *data,
279				  unsigned int bytes)
280{
281	struct hmm_buffer_object *bo;
282	unsigned int idx, offset, len;
283	char *src, *des;
284	int ret;
285
286	bo = hmm_bo_device_search_in_range(&bo_device, virt);
287	ret = hmm_check_bo(bo, virt);
288	if (ret)
289		return ret;
290
291	des = (char *)data;
292	while (bytes) {
293		idx = (virt - bo->start) >> PAGE_SHIFT;
294		offset = (virt - bo->start) - (idx << PAGE_SHIFT);
295
296		src = (char *)kmap_local_page(bo->pages[idx]) + offset;
297
298		if ((bytes + offset) >= PAGE_SIZE) {
299			len = PAGE_SIZE - offset;
300			bytes -= len;
301		} else {
302			len = bytes;
303			bytes = 0;
304		}
305
306		virt += len;	/* update virt for next loop */
307
308		if (des) {
309			memcpy(des, src, len);
310			des += len;
311		}
312
313		clflush_cache_range(src, len);
314
315		kunmap_local(src);
316	}
317
318	return 0;
319}
320
321/* Read function in ISP memory management */
322static int load_and_flush(ia_css_ptr virt, void *data, unsigned int bytes)
323{
324	struct hmm_buffer_object *bo;
325	int ret;
326
327	bo = hmm_bo_device_search_in_range(&bo_device, virt);
328	ret = hmm_check_bo(bo, virt);
329	if (ret)
330		return ret;
331
332	if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
333		void *src = bo->vmap_addr;
334
335		src += (virt - bo->start);
336		memcpy(data, src, bytes);
337		if (bo->status & HMM_BO_VMAPED_CACHED)
338			clflush_cache_range(src, bytes);
339	} else {
340		void *vptr;
341
342		vptr = hmm_bo_vmap(bo, true);
343		if (!vptr)
344			return load_and_flush_by_kmap(virt, data, bytes);
345		else
346			vptr = vptr + (virt - bo->start);
347
348		memcpy(data, vptr, bytes);
349		clflush_cache_range(vptr, bytes);
350		hmm_bo_vunmap(bo);
351	}
352
353	return 0;
354}
355
356/* Read function in ISP memory management */
357int hmm_load(ia_css_ptr virt, void *data, unsigned int bytes)
358{
359	if (!virt) {
360		dev_warn(atomisp_dev,
361			"hmm_store: address is NULL\n");
362		return -EINVAL;
363	}
364	if (!data) {
365		dev_err(atomisp_dev,
366			"hmm_store: data is a NULL argument\n");
367		return -EINVAL;
368	}
369	return load_and_flush(virt, data, bytes);
370}
371
372/* Flush hmm data from the data cache */
373int hmm_flush(ia_css_ptr virt, unsigned int bytes)
374{
375	return load_and_flush(virt, NULL, bytes);
376}
377
378/* Write function in ISP memory management */
379int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes)
380{
381	struct hmm_buffer_object *bo;
382	unsigned int idx, offset, len;
383	char *src, *des;
384	int ret;
385
386	if (!virt) {
387		dev_warn(atomisp_dev,
388			"hmm_store: address is NULL\n");
389		return -EINVAL;
390	}
391	if (!data) {
392		dev_err(atomisp_dev,
393			"hmm_store: data is a NULL argument\n");
394		return -EINVAL;
395	}
396
397	bo = hmm_bo_device_search_in_range(&bo_device, virt);
398	ret = hmm_check_bo(bo, virt);
399	if (ret)
400		return ret;
401
402	if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
403		void *dst = bo->vmap_addr;
404
405		dst += (virt - bo->start);
406		memcpy(dst, data, bytes);
407		if (bo->status & HMM_BO_VMAPED_CACHED)
408			clflush_cache_range(dst, bytes);
409	} else {
410		void *vptr;
411
412		vptr = hmm_bo_vmap(bo, true);
413		if (vptr) {
414			vptr = vptr + (virt - bo->start);
415
416			memcpy(vptr, data, bytes);
417			clflush_cache_range(vptr, bytes);
418			hmm_bo_vunmap(bo);
419			return 0;
420		}
421	}
422
423	src = (char *)data;
424	while (bytes) {
425		idx = (virt - bo->start) >> PAGE_SHIFT;
426		offset = (virt - bo->start) - (idx << PAGE_SHIFT);
427
428		des = (char *)kmap_local_page(bo->pages[idx]);
429
430		if (!des) {
431			dev_err(atomisp_dev,
432				"kmap buffer object page failed: pg_idx = %d\n",
433				idx);
434			return -EINVAL;
435		}
436
437		des += offset;
438
439		if ((bytes + offset) >= PAGE_SIZE) {
440			len = PAGE_SIZE - offset;
441			bytes -= len;
442		} else {
443			len = bytes;
444			bytes = 0;
445		}
446
447		virt += len;
448
449		memcpy(des, src, len);
450
451		src += len;
452
453		clflush_cache_range(des, len);
454
455		kunmap_local(des);
456	}
457
458	return 0;
459}
460
461/* memset function in ISP memory management */
462int hmm_set(ia_css_ptr virt, int c, unsigned int bytes)
463{
464	struct hmm_buffer_object *bo;
465	unsigned int idx, offset, len;
466	char *des;
467	int ret;
468
469	bo = hmm_bo_device_search_in_range(&bo_device, virt);
470	ret = hmm_check_bo(bo, virt);
471	if (ret)
472		return ret;
473
474	if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
475		void *dst = bo->vmap_addr;
476
477		dst += (virt - bo->start);
478		memset(dst, c, bytes);
479
480		if (bo->status & HMM_BO_VMAPED_CACHED)
481			clflush_cache_range(dst, bytes);
482	} else {
483		void *vptr;
484
485		vptr = hmm_bo_vmap(bo, true);
486		if (vptr) {
487			vptr = vptr + (virt - bo->start);
488			memset(vptr, c, bytes);
489			clflush_cache_range(vptr, bytes);
490			hmm_bo_vunmap(bo);
491			return 0;
492		}
493	}
494
495	while (bytes) {
496		idx = (virt - bo->start) >> PAGE_SHIFT;
497		offset = (virt - bo->start) - (idx << PAGE_SHIFT);
498
499		des = (char *)kmap_local_page(bo->pages[idx]) + offset;
500
501		if ((bytes + offset) >= PAGE_SIZE) {
502			len = PAGE_SIZE - offset;
503			bytes -= len;
504		} else {
505			len = bytes;
506			bytes = 0;
507		}
508
509		virt += len;
510
511		memset(des, c, len);
512
513		clflush_cache_range(des, len);
514
515		kunmap_local(des);
516	}
517
518	return 0;
519}
520
521/* Virtual address to physical address convert */
522phys_addr_t hmm_virt_to_phys(ia_css_ptr virt)
523{
524	unsigned int idx, offset;
525	struct hmm_buffer_object *bo;
526
527	bo = hmm_bo_device_search_in_range(&bo_device, virt);
528	if (!bo) {
529		dev_err(atomisp_dev,
530			"can not find buffer object contains address 0x%x\n",
531			virt);
532		return -1;
533	}
534
535	idx = (virt - bo->start) >> PAGE_SHIFT;
536	offset = (virt - bo->start) - (idx << PAGE_SHIFT);
537
538	return page_to_phys(bo->pages[idx]) + offset;
539}
540
541int hmm_mmap(struct vm_area_struct *vma, ia_css_ptr virt)
542{
543	struct hmm_buffer_object *bo;
544
545	bo = hmm_bo_device_search_start(&bo_device, virt);
546	if (!bo) {
547		dev_err(atomisp_dev,
548			"can not find buffer object start with address 0x%x\n",
549			virt);
550		return -EINVAL;
551	}
552
553	return hmm_bo_mmap(vma, bo);
554}
555
556/* Map ISP virtual address into IA virtual address */
557void *hmm_vmap(ia_css_ptr virt, bool cached)
558{
559	struct hmm_buffer_object *bo;
560	void *ptr;
561
562	bo = hmm_bo_device_search_in_range(&bo_device, virt);
563	if (!bo) {
564		dev_err(atomisp_dev,
565			"can not find buffer object contains address 0x%x\n",
566			virt);
567		return NULL;
568	}
569
570	ptr = hmm_bo_vmap(bo, cached);
571	if (ptr)
572		return ptr + (virt - bo->start);
573	else
574		return NULL;
575}
576
577/* Flush the memory which is mapped as cached memory through hmm_vmap */
578void hmm_flush_vmap(ia_css_ptr virt)
579{
580	struct hmm_buffer_object *bo;
581
582	bo = hmm_bo_device_search_in_range(&bo_device, virt);
583	if (!bo) {
584		dev_warn(atomisp_dev,
585			 "can not find buffer object contains address 0x%x\n",
586			 virt);
587		return;
588	}
589
590	hmm_bo_flush_vmap(bo);
591}
592
593void hmm_vunmap(ia_css_ptr virt)
594{
595	struct hmm_buffer_object *bo;
596
597	bo = hmm_bo_device_search_in_range(&bo_device, virt);
598	if (!bo) {
599		dev_warn(atomisp_dev,
600			 "can not find buffer object contains address 0x%x\n",
601			 virt);
602		return;
603	}
604
605	hmm_bo_vunmap(bo);
606}
607