1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Support for Medifield PNW Camera Imaging ISP subsystem.
4 *
5 * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
6 *
7 * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 *
19 */
20/*
21 * ISP MMU management wrap code
22 */
23#include <linux/kernel.h>
24#include <linux/types.h>
25#include <linux/gfp.h>
26#include <linux/mm.h>		/* for GFP_ATOMIC */
27#include <linux/slab.h>		/* for kmalloc */
28#include <linux/list.h>
29#include <linux/io.h>
30#include <linux/module.h>
31#include <linux/moduleparam.h>
32#include <linux/string.h>
33#include <linux/errno.h>
34#include <linux/sizes.h>
35
36#ifdef CONFIG_X86
37#include <asm/set_memory.h>
38#endif
39
40#include "atomisp_internal.h"
41#include "mmu/isp_mmu.h"
42
43/*
44 * 64-bit x86 processor physical address layout:
45 * 0		- 0x7fffffff		DDR RAM	(2GB)
46 * 0x80000000	- 0xffffffff		MMIO	(2GB)
47 * 0x100000000	- 0x3fffffffffff	DDR RAM	(64TB)
48 * So if the system has more than 2GB DDR memory, the lower 2GB occupies the
49 * physical address 0 - 0x7fffffff and the rest will start from 0x100000000.
50 * We have to make sure memory is allocated from the lower 2GB for devices
51 * that are only 32-bit capable(e.g. the ISP MMU).
52 *
53 * For any confusion, contact bin.gao@intel.com.
54 */
55#define NR_PAGES_2GB	(SZ_2G / PAGE_SIZE)
56
57static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt,
58			 unsigned int end_isp_virt);
59
60static unsigned int atomisp_get_pte(phys_addr_t pt, unsigned int idx)
61{
62	unsigned int *pt_virt = phys_to_virt(pt);
63
64	return *(pt_virt + idx);
65}
66
67static void atomisp_set_pte(phys_addr_t pt,
68			    unsigned int idx, unsigned int pte)
69{
70	unsigned int *pt_virt = phys_to_virt(pt);
71	*(pt_virt + idx) = pte;
72}
73
74static void *isp_pt_phys_to_virt(phys_addr_t phys)
75{
76	return phys_to_virt(phys);
77}
78
79static phys_addr_t isp_pte_to_pgaddr(struct isp_mmu *mmu,
80				     unsigned int pte)
81{
82	return mmu->driver->pte_to_phys(mmu, pte);
83}
84
85static unsigned int isp_pgaddr_to_pte_valid(struct isp_mmu *mmu,
86	phys_addr_t phys)
87{
88	unsigned int pte = mmu->driver->phys_to_pte(mmu, phys);
89
90	return (unsigned int)(pte | ISP_PTE_VALID_MASK(mmu));
91}
92
93/*
94 * allocate a uncacheable page table.
95 * return physical address.
96 */
97static phys_addr_t alloc_page_table(struct isp_mmu *mmu)
98{
99	int i;
100	phys_addr_t page;
101	void *virt;
102
103	virt = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32);
104
105	if (!virt)
106		return (phys_addr_t)NULL_PAGE;
107
108	/*
109	 * we need a uncacheable page table.
110	 */
111#ifdef	CONFIG_X86
112	set_memory_uc((unsigned long)virt, 1);
113#endif
114
115	page = virt_to_phys(virt);
116
117	for (i = 0; i < 1024; i++) {
118		/* NEED CHECK */
119		atomisp_set_pte(page, i, mmu->driver->null_pte);
120	}
121
122	return page;
123}
124
125static void free_page_table(struct isp_mmu *mmu, phys_addr_t page)
126{
127	void *virt;
128
129	page &= ISP_PAGE_MASK;
130	/*
131	 * reset the page to write back before free
132	 */
133	virt = phys_to_virt(page);
134
135#ifdef	CONFIG_X86
136	set_memory_wb((unsigned long)virt, 1);
137#endif
138
139	free_page((unsigned long)virt);
140}
141
142static void mmu_remap_error(struct isp_mmu *mmu,
143			    phys_addr_t l1_pt, unsigned int l1_idx,
144			    phys_addr_t l2_pt, unsigned int l2_idx,
145			    unsigned int isp_virt, phys_addr_t old_phys,
146			    phys_addr_t new_phys)
147{
148	dev_err(atomisp_dev, "address remap:\n\n"
149		"\tL1 PT: virt = %p, phys = 0x%llx, idx = %d\n"
150		"\tL2 PT: virt = %p, phys = 0x%llx, idx = %d\n"
151		"\told: isp_virt = 0x%x, phys = 0x%llx\n"
152		"\tnew: isp_virt = 0x%x, phys = 0x%llx\n",
153		isp_pt_phys_to_virt(l1_pt),
154		(u64)l1_pt, l1_idx,
155		isp_pt_phys_to_virt(l2_pt),
156		(u64)l2_pt, l2_idx, isp_virt,
157		(u64)old_phys, isp_virt,
158		(u64)new_phys);
159}
160
161static void mmu_unmap_l2_pte_error(struct isp_mmu *mmu,
162				   phys_addr_t l1_pt, unsigned int l1_idx,
163				   phys_addr_t l2_pt, unsigned int l2_idx,
164				   unsigned int isp_virt, unsigned int pte)
165{
166	dev_err(atomisp_dev, "unmap invalid L2 pte:\n\n"
167		"\tL1 PT: virt = %p, phys = 0x%llx, idx = %d\n"
168		"\tL2 PT: virt = %p, phys = 0x%llx, idx = %d\n"
169		"\tisp_virt = 0x%x, pte(page phys) = 0x%x\n",
170		isp_pt_phys_to_virt(l1_pt),
171		(u64)l1_pt, l1_idx,
172		isp_pt_phys_to_virt(l2_pt),
173		(u64)l2_pt, l2_idx, isp_virt,
174		pte);
175}
176
177static void mmu_unmap_l1_pte_error(struct isp_mmu *mmu,
178				   phys_addr_t l1_pt, unsigned int l1_idx,
179				   unsigned int isp_virt, unsigned int pte)
180{
181	dev_err(atomisp_dev, "unmap invalid L1 pte (L2 PT):\n\n"
182		"\tL1 PT: virt = %p, phys = 0x%llx, idx = %d\n"
183		"\tisp_virt = 0x%x, l1_pte(L2 PT) = 0x%x\n",
184		isp_pt_phys_to_virt(l1_pt),
185		(u64)l1_pt, l1_idx, (unsigned int)isp_virt,
186		pte);
187}
188
189static void mmu_unmap_l1_pt_error(struct isp_mmu *mmu, unsigned int pte)
190{
191	dev_err(atomisp_dev, "unmap invalid L1PT:\n\n"
192		"L1PT = 0x%x\n", (unsigned int)pte);
193}
194
195/*
196 * Update L2 page table according to isp virtual address and page physical
197 * address
198 */
199static int mmu_l2_map(struct isp_mmu *mmu, phys_addr_t l1_pt,
200		      unsigned int l1_idx, phys_addr_t l2_pt,
201		      unsigned int start, unsigned int end, phys_addr_t phys)
202{
203	unsigned int ptr;
204	unsigned int idx;
205	unsigned int pte;
206
207	l2_pt &= ISP_PAGE_MASK;
208
209	start = start & ISP_PAGE_MASK;
210	end = ISP_PAGE_ALIGN(end);
211	phys &= ISP_PAGE_MASK;
212
213	ptr = start;
214	do {
215		idx = ISP_PTR_TO_L2_IDX(ptr);
216
217		pte = atomisp_get_pte(l2_pt, idx);
218
219		if (ISP_PTE_VALID(mmu, pte)) {
220			mmu_remap_error(mmu, l1_pt, l1_idx,
221					l2_pt, idx, ptr, pte, phys);
222
223			/* free all mapped pages */
224			free_mmu_map(mmu, start, ptr);
225
226			return -EINVAL;
227		}
228
229		pte = isp_pgaddr_to_pte_valid(mmu, phys);
230
231		atomisp_set_pte(l2_pt, idx, pte);
232		mmu->l2_pgt_refcount[l1_idx]++;
233		ptr += (1U << ISP_L2PT_OFFSET);
234		phys += (1U << ISP_L2PT_OFFSET);
235	} while (ptr < end && idx < ISP_L2PT_PTES - 1);
236
237	return 0;
238}
239
240/*
241 * Update L1 page table according to isp virtual address and page physical
242 * address
243 */
244static int mmu_l1_map(struct isp_mmu *mmu, phys_addr_t l1_pt,
245		      unsigned int start, unsigned int end,
246		      phys_addr_t phys)
247{
248	phys_addr_t l2_pt;
249	unsigned int ptr, l1_aligned;
250	unsigned int idx;
251	unsigned int l2_pte;
252	int ret;
253
254	l1_pt &= ISP_PAGE_MASK;
255
256	start = start & ISP_PAGE_MASK;
257	end = ISP_PAGE_ALIGN(end);
258	phys &= ISP_PAGE_MASK;
259
260	ptr = start;
261	do {
262		idx = ISP_PTR_TO_L1_IDX(ptr);
263
264		l2_pte = atomisp_get_pte(l1_pt, idx);
265
266		if (!ISP_PTE_VALID(mmu, l2_pte)) {
267			l2_pt = alloc_page_table(mmu);
268			if (l2_pt == NULL_PAGE) {
269				dev_err(atomisp_dev,
270					"alloc page table fail.\n");
271
272				/* free all mapped pages */
273				free_mmu_map(mmu, start, ptr);
274
275				return -ENOMEM;
276			}
277
278			l2_pte = isp_pgaddr_to_pte_valid(mmu, l2_pt);
279
280			atomisp_set_pte(l1_pt, idx, l2_pte);
281			mmu->l2_pgt_refcount[idx] = 0;
282		}
283
284		l2_pt = isp_pte_to_pgaddr(mmu, l2_pte);
285
286		l1_aligned = (ptr & ISP_PAGE_MASK) + (1U << ISP_L1PT_OFFSET);
287
288		if (l1_aligned < end) {
289			ret = mmu_l2_map(mmu, l1_pt, idx,
290					 l2_pt, ptr, l1_aligned, phys);
291			phys += (l1_aligned - ptr);
292			ptr = l1_aligned;
293		} else {
294			ret = mmu_l2_map(mmu, l1_pt, idx,
295					 l2_pt, ptr, end, phys);
296			phys += (end - ptr);
297			ptr = end;
298		}
299
300		if (ret) {
301			dev_err(atomisp_dev, "setup mapping in L2PT fail.\n");
302
303			/* free all mapped pages */
304			free_mmu_map(mmu, start, ptr);
305
306			return -EINVAL;
307		}
308	} while (ptr < end && idx < ISP_L1PT_PTES);
309
310	return 0;
311}
312
313/*
314 * Update page table according to isp virtual address and page physical
315 * address
316 */
317static int mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
318		   phys_addr_t phys, unsigned int pgnr)
319{
320	unsigned int start, end;
321	phys_addr_t l1_pt;
322	int ret;
323
324	mutex_lock(&mmu->pt_mutex);
325	if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
326		/*
327		 * allocate 1 new page for L1 page table
328		 */
329		l1_pt = alloc_page_table(mmu);
330		if (l1_pt == NULL_PAGE) {
331			dev_err(atomisp_dev, "alloc page table fail.\n");
332			mutex_unlock(&mmu->pt_mutex);
333			return -ENOMEM;
334		}
335
336		/*
337		 * setup L1 page table physical addr to MMU
338		 */
339		mmu->base_address = l1_pt;
340		mmu->l1_pte = isp_pgaddr_to_pte_valid(mmu, l1_pt);
341		memset(mmu->l2_pgt_refcount, 0, sizeof(int) * ISP_L1PT_PTES);
342	}
343
344	l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);
345
346	start = (isp_virt) & ISP_PAGE_MASK;
347	end = start + (pgnr << ISP_PAGE_OFFSET);
348	phys &= ISP_PAGE_MASK;
349
350	ret = mmu_l1_map(mmu, l1_pt, start, end, phys);
351
352	if (ret)
353		dev_err(atomisp_dev, "setup mapping in L1PT fail.\n");
354
355	mutex_unlock(&mmu->pt_mutex);
356	return ret;
357}
358
359/*
360 * Free L2 page table according to isp virtual address and page physical
361 * address
362 */
363static void mmu_l2_unmap(struct isp_mmu *mmu, phys_addr_t l1_pt,
364			 unsigned int l1_idx, phys_addr_t l2_pt,
365			 unsigned int start, unsigned int end)
366{
367	unsigned int ptr;
368	unsigned int idx;
369	unsigned int pte;
370
371	l2_pt &= ISP_PAGE_MASK;
372
373	start = start & ISP_PAGE_MASK;
374	end = ISP_PAGE_ALIGN(end);
375
376	ptr = start;
377	do {
378		idx = ISP_PTR_TO_L2_IDX(ptr);
379
380		pte = atomisp_get_pte(l2_pt, idx);
381
382		if (!ISP_PTE_VALID(mmu, pte))
383			mmu_unmap_l2_pte_error(mmu, l1_pt, l1_idx,
384					       l2_pt, idx, ptr, pte);
385
386		atomisp_set_pte(l2_pt, idx, mmu->driver->null_pte);
387		mmu->l2_pgt_refcount[l1_idx]--;
388		ptr += (1U << ISP_L2PT_OFFSET);
389	} while (ptr < end && idx < ISP_L2PT_PTES - 1);
390
391	if (mmu->l2_pgt_refcount[l1_idx] == 0) {
392		free_page_table(mmu, l2_pt);
393		atomisp_set_pte(l1_pt, l1_idx, mmu->driver->null_pte);
394	}
395}
396
397/*
398 * Free L1 page table according to isp virtual address and page physical
399 * address
400 */
401static void mmu_l1_unmap(struct isp_mmu *mmu, phys_addr_t l1_pt,
402			 unsigned int start, unsigned int end)
403{
404	phys_addr_t l2_pt;
405	unsigned int ptr, l1_aligned;
406	unsigned int idx;
407	unsigned int l2_pte;
408
409	l1_pt &= ISP_PAGE_MASK;
410
411	start = start & ISP_PAGE_MASK;
412	end = ISP_PAGE_ALIGN(end);
413
414	ptr = start;
415	do {
416		idx = ISP_PTR_TO_L1_IDX(ptr);
417
418		l2_pte = atomisp_get_pte(l1_pt, idx);
419
420		if (!ISP_PTE_VALID(mmu, l2_pte)) {
421			mmu_unmap_l1_pte_error(mmu, l1_pt, idx, ptr, l2_pte);
422			continue;
423		}
424
425		l2_pt = isp_pte_to_pgaddr(mmu, l2_pte);
426
427		l1_aligned = (ptr & ISP_PAGE_MASK) + (1U << ISP_L1PT_OFFSET);
428
429		if (l1_aligned < end) {
430			mmu_l2_unmap(mmu, l1_pt, idx, l2_pt, ptr, l1_aligned);
431			ptr = l1_aligned;
432		} else {
433			mmu_l2_unmap(mmu, l1_pt, idx, l2_pt, ptr, end);
434			ptr = end;
435		}
436		/*
437		 * use the same L2 page next time, so we don't
438		 * need to invalidate and free this PT.
439		 */
440		/*      atomisp_set_pte(l1_pt, idx, NULL_PTE); */
441	} while (ptr < end && idx < ISP_L1PT_PTES);
442}
443
444/*
445 * Free page table according to isp virtual address and page physical
446 * address
447 */
448static void mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
449		      unsigned int pgnr)
450{
451	unsigned int start, end;
452	phys_addr_t l1_pt;
453
454	mutex_lock(&mmu->pt_mutex);
455	if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
456		mmu_unmap_l1_pt_error(mmu, mmu->l1_pte);
457		mutex_unlock(&mmu->pt_mutex);
458		return;
459	}
460
461	l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);
462
463	start = (isp_virt) & ISP_PAGE_MASK;
464	end = start + (pgnr << ISP_PAGE_OFFSET);
465
466	mmu_l1_unmap(mmu, l1_pt, start, end);
467	mutex_unlock(&mmu->pt_mutex);
468}
469
470/*
471 * Free page tables according to isp start virtual address and end virtual
472 * address.
473 */
474static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt,
475			 unsigned int end_isp_virt)
476{
477	unsigned int pgnr;
478	unsigned int start, end;
479
480	start = (start_isp_virt) & ISP_PAGE_MASK;
481	end = (end_isp_virt) & ISP_PAGE_MASK;
482	pgnr = (end - start) >> ISP_PAGE_OFFSET;
483	mmu_unmap(mmu, start, pgnr);
484}
485
486int isp_mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
487		phys_addr_t phys, unsigned int pgnr)
488{
489	return mmu_map(mmu, isp_virt, phys, pgnr);
490}
491
492void isp_mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
493		   unsigned int pgnr)
494{
495	mmu_unmap(mmu, isp_virt, pgnr);
496}
497
498static void isp_mmu_flush_tlb_range_default(struct isp_mmu *mmu,
499	unsigned int start,
500	unsigned int size)
501{
502	isp_mmu_flush_tlb(mmu);
503}
504
505/*MMU init for internal structure*/
506int isp_mmu_init(struct isp_mmu *mmu, struct isp_mmu_client *driver)
507{
508	if (!mmu)		/* error */
509		return -EINVAL;
510	if (!driver)		/* error */
511		return -EINVAL;
512
513	if (!driver->name)
514		dev_warn(atomisp_dev, "NULL name for MMU driver...\n");
515
516	mmu->driver = driver;
517
518	if (!driver->tlb_flush_all) {
519		dev_err(atomisp_dev, "tlb_flush_all operation not provided.\n");
520		return -EINVAL;
521	}
522
523	if (!driver->tlb_flush_range)
524		driver->tlb_flush_range = isp_mmu_flush_tlb_range_default;
525
526	if (!driver->pte_valid_mask) {
527		dev_err(atomisp_dev, "PTE_MASK is missing from mmu driver\n");
528		return -EINVAL;
529	}
530
531	mmu->l1_pte = driver->null_pte;
532
533	mutex_init(&mmu->pt_mutex);
534
535	return 0;
536}
537
538/*Free L1 and L2 page table*/
539void isp_mmu_exit(struct isp_mmu *mmu)
540{
541	unsigned int idx;
542	unsigned int pte;
543	phys_addr_t l1_pt, l2_pt;
544
545	if (!mmu)
546		return;
547
548	if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
549		dev_warn(atomisp_dev, "invalid L1PT: pte = 0x%x\n",
550			 (unsigned int)mmu->l1_pte);
551		return;
552	}
553
554	l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);
555
556	for (idx = 0; idx < ISP_L1PT_PTES; idx++) {
557		pte = atomisp_get_pte(l1_pt, idx);
558
559		if (ISP_PTE_VALID(mmu, pte)) {
560			l2_pt = isp_pte_to_pgaddr(mmu, pte);
561
562			free_page_table(mmu, l2_pt);
563		}
564	}
565
566	free_page_table(mmu, l1_pt);
567}
568