1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
3
4#include <linux/dma-mapping.h>
5
6#include "msm_drv.h"
7#include "msm_mmu.h"
8
9#include "adreno_gpu.h"
10#include "a2xx_gpu.h"
11
12#include "a2xx.xml.h"
13
14struct a2xx_gpummu {
15	struct msm_mmu base;
16	struct msm_gpu *gpu;
17	dma_addr_t pt_base;
18	uint32_t *table;
19};
20#define to_a2xx_gpummu(x) container_of(x, struct a2xx_gpummu, base)
21
22#define GPUMMU_VA_START SZ_16M
23#define GPUMMU_VA_RANGE (0xfff * SZ_64K)
24#define GPUMMU_PAGE_SIZE SZ_4K
25#define TABLE_SIZE (sizeof(uint32_t) * GPUMMU_VA_RANGE / GPUMMU_PAGE_SIZE)
26
27static void a2xx_gpummu_detach(struct msm_mmu *mmu)
28{
29}
30
31static int a2xx_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
32		struct sg_table *sgt, size_t len, int prot)
33{
34	struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu);
35	unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
36	struct sg_dma_page_iter dma_iter;
37	unsigned prot_bits = 0;
38
39	if (prot & IOMMU_WRITE)
40		prot_bits |= 1;
41	if (prot & IOMMU_READ)
42		prot_bits |= 2;
43
44	for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
45		dma_addr_t addr = sg_page_iter_dma_address(&dma_iter);
46		int i;
47
48		for (i = 0; i < PAGE_SIZE; i += GPUMMU_PAGE_SIZE)
49			gpummu->table[idx++] = (addr + i) | prot_bits;
50	}
51
52	/* we can improve by deferring flush for multiple map() */
53	gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE,
54		A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
55		A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
56	return 0;
57}
58
59static int a2xx_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
60{
61	struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu);
62	unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
63	unsigned i;
64
65	for (i = 0; i < len / GPUMMU_PAGE_SIZE; i++, idx++)
66                gpummu->table[idx] = 0;
67
68	gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE,
69		A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
70		A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
71	return 0;
72}
73
74static void a2xx_gpummu_resume_translation(struct msm_mmu *mmu)
75{
76}
77
78static void a2xx_gpummu_destroy(struct msm_mmu *mmu)
79{
80	struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu);
81
82	dma_free_attrs(mmu->dev, TABLE_SIZE, gpummu->table, gpummu->pt_base,
83		DMA_ATTR_FORCE_CONTIGUOUS);
84
85	kfree(gpummu);
86}
87
88static const struct msm_mmu_funcs funcs = {
89		.detach = a2xx_gpummu_detach,
90		.map = a2xx_gpummu_map,
91		.unmap = a2xx_gpummu_unmap,
92		.destroy = a2xx_gpummu_destroy,
93		.resume_translation = a2xx_gpummu_resume_translation,
94};
95
96struct msm_mmu *a2xx_gpummu_new(struct device *dev, struct msm_gpu *gpu)
97{
98	struct a2xx_gpummu *gpummu;
99
100	gpummu = kzalloc(sizeof(*gpummu), GFP_KERNEL);
101	if (!gpummu)
102		return ERR_PTR(-ENOMEM);
103
104	gpummu->table = dma_alloc_attrs(dev, TABLE_SIZE + 32, &gpummu->pt_base,
105		GFP_KERNEL | __GFP_ZERO, DMA_ATTR_FORCE_CONTIGUOUS);
106	if (!gpummu->table) {
107		kfree(gpummu);
108		return ERR_PTR(-ENOMEM);
109	}
110
111	gpummu->gpu = gpu;
112	msm_mmu_init(&gpummu->base, dev, &funcs, MSM_MMU_GPUMMU);
113
114	return &gpummu->base;
115}
116
117void a2xx_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
118		dma_addr_t *tran_error)
119{
120	dma_addr_t base = to_a2xx_gpummu(mmu)->pt_base;
121
122	*pt_base = base;
123	*tran_error = base + TABLE_SIZE; /* 32-byte aligned */
124}
125