1#include "drmP.h"
2#include "nouveau_drv.h"
3
4#define NV_CTXDMA_PAGE_SHIFT 12
5#define NV_CTXDMA_PAGE_SIZE  (1 << NV_CTXDMA_PAGE_SHIFT)
6#define NV_CTXDMA_PAGE_MASK  (NV_CTXDMA_PAGE_SIZE - 1)
7
8#if 0
9struct nouveau_sgdma_be {
10	struct drm_ttm_backend backend;
11	struct drm_device *dev;
12
13	int         pages;
14	int         pages_populated;
15	dma_addr_t *pagelist;
16	int         is_bound;
17
18	unsigned int pte_start;
19};
20
21static int
22nouveau_sgdma_needs_ub_cache_adjust(struct drm_ttm_backend *be)
23{
24	return ((be->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
25}
26
27static int
28nouveau_sgdma_populate(struct drm_ttm_backend *be, unsigned long num_pages,
29		       struct page **pages, struct page *dummy_read_page)
30{
31	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
32	int p, d, o;
33
34	DRM_DEBUG("num_pages = %ld\n", num_pages);
35
36	if (nvbe->pagelist)
37		return -EINVAL;
38	nvbe->pages    = (num_pages << PAGE_SHIFT) >> NV_CTXDMA_PAGE_SHIFT;
39	nvbe->pagelist = drm_alloc(nvbe->pages*sizeof(dma_addr_t),
40				   DRM_MEM_PAGES);
41
42	nvbe->pages_populated = d = 0;
43	for (p = 0; p < num_pages; p++) {
44		for (o = 0; o < PAGE_SIZE; o += NV_CTXDMA_PAGE_SIZE) {
45			struct page *page = pages[p];
46			if (!page)
47				page = dummy_read_page;
48#ifdef __linux__
49			nvbe->pagelist[d] = pci_map_page(nvbe->dev->pdev,
50							 page, o,
51							 NV_CTXDMA_PAGE_SIZE,
52							 PCI_DMA_BIDIRECTIONAL);
53#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
54			if (pci_dma_mapping_error(nvbe->dev->pdev, nvbe->pagelist[d])) {
55#else
56			if (pci_dma_mapping_error(nvbe->pagelist[d])) {
57#endif
58				be->func->clear(be);
59				DRM_ERROR("pci_map_page failed\n");
60				return -EINVAL;
61			}
62#endif
63			nvbe->pages_populated = ++d;
64		}
65	}
66
67	return 0;
68}
69
70static void
71nouveau_sgdma_clear(struct drm_ttm_backend *be)
72{
73	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
74#ifdef __linux__
75	int d;
76#endif
77	DRM_DEBUG("\n");
78
79	if (nvbe && nvbe->pagelist) {
80		if (nvbe->is_bound)
81			be->func->unbind(be);
82#ifdef __linux__
83		for (d = 0; d < nvbe->pages_populated; d++) {
84			pci_unmap_page(nvbe->dev->pdev, nvbe->pagelist[d],
85				       NV_CTXDMA_PAGE_SIZE,
86				       PCI_DMA_BIDIRECTIONAL);
87		}
88#endif
89		drm_free(nvbe->pagelist, nvbe->pages*sizeof(dma_addr_t),
90			 DRM_MEM_PAGES);
91	}
92}
93
94static int
95nouveau_sgdma_bind(struct drm_ttm_backend *be, struct drm_bo_mem_reg *mem)
96{
97	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
98	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
99	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
100	uint64_t offset = (mem->mm_node->start << PAGE_SHIFT);
101	uint32_t i;
102
103	DRM_DEBUG("pg=0x%lx (0x%llx), cached=%d\n", mem->mm_node->start,
104		  (unsigned long long)offset,
105		  (mem->flags & DRM_BO_FLAG_CACHED) == 1);
106
107	if (offset & NV_CTXDMA_PAGE_MASK)
108		return -EINVAL;
109	nvbe->pte_start = (offset >> NV_CTXDMA_PAGE_SHIFT);
110	if (dev_priv->card_type < NV_50)
111		nvbe->pte_start += 2; /* skip ctxdma header */
112
113	for (i = nvbe->pte_start; i < nvbe->pte_start + nvbe->pages; i++) {
114		uint64_t pteval = nvbe->pagelist[i - nvbe->pte_start];
115
116		if (pteval & NV_CTXDMA_PAGE_MASK) {
117			DRM_ERROR("Bad pteval 0x%llx\n",
118				(unsigned long long)pteval);
119			return -EINVAL;
120		}
121
122		if (dev_priv->card_type < NV_50) {
123			INSTANCE_WR(gpuobj, i, pteval | 3);
124		} else {
125			INSTANCE_WR(gpuobj, (i<<1)+0, pteval | 0x21);
126			INSTANCE_WR(gpuobj, (i<<1)+1, 0x00000000);
127		}
128	}
129
130	nvbe->is_bound  = 1;
131	return 0;
132}
133
134static int
135nouveau_sgdma_unbind(struct drm_ttm_backend *be)
136{
137	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
138	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
139
140	DRM_DEBUG("\n");
141
142	if (nvbe->is_bound) {
143		struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
144		unsigned int pte;
145
146		pte = nvbe->pte_start;
147		while (pte < (nvbe->pte_start + nvbe->pages)) {
148			uint64_t pteval = dev_priv->gart_info.sg_dummy_bus;
149
150			if (dev_priv->card_type < NV_50) {
151				INSTANCE_WR(gpuobj, pte, pteval | 3);
152			} else {
153				INSTANCE_WR(gpuobj, (pte<<1)+0, pteval | 0x21);
154				INSTANCE_WR(gpuobj, (pte<<1)+1, 0x00000000);
155			}
156
157			pte++;
158		}
159
160		nvbe->is_bound = 0;
161	}
162
163	return 0;
164}
165
166static void
167nouveau_sgdma_destroy(struct drm_ttm_backend *be)
168{
169	DRM_DEBUG("\n");
170	if (be) {
171		struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
172		if (nvbe) {
173			if (nvbe->pagelist)
174				be->func->clear(be);
175			drm_ctl_free(nvbe, sizeof(*nvbe), DRM_MEM_TTM);
176		}
177	}
178}
179
180static struct drm_ttm_backend_func nouveau_sgdma_backend = {
181	.needs_ub_cache_adjust	= nouveau_sgdma_needs_ub_cache_adjust,
182	.populate		= nouveau_sgdma_populate,
183	.clear			= nouveau_sgdma_clear,
184	.bind			= nouveau_sgdma_bind,
185	.unbind			= nouveau_sgdma_unbind,
186	.destroy		= nouveau_sgdma_destroy
187};
188
189struct drm_ttm_backend *
190nouveau_sgdma_init_ttm(struct drm_device *dev)
191{
192	struct drm_nouveau_private *dev_priv = dev->dev_private;
193	struct nouveau_sgdma_be *nvbe;
194
195	if (!dev_priv->gart_info.sg_ctxdma)
196		return NULL;
197
198	nvbe = drm_ctl_calloc(1, sizeof(*nvbe), DRM_MEM_TTM);
199	if (!nvbe)
200		return NULL;
201
202	nvbe->dev = dev;
203
204	nvbe->backend.func	= &nouveau_sgdma_backend;
205
206	return &nvbe->backend;
207}
208#endif
209
210int
211nouveau_sgdma_init(struct drm_device *dev)
212{
213	struct drm_nouveau_private *dev_priv = dev->dev_private;
214	struct nouveau_gpuobj *gpuobj = NULL;
215	uint32_t aper_size, obj_size;
216	int i, ret;
217
218	if (dev_priv->card_type < NV_50) {
219		aper_size = (64 * 1024 * 1024);
220		obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
221		obj_size += 8; /* ctxdma header */
222	} else {
223		/* 1 entire VM page table */
224		aper_size = (512 * 1024 * 1024);
225		obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
226	}
227
228	if ((ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
229				      NVOBJ_FLAG_ALLOW_NO_REFS |
230				      NVOBJ_FLAG_ZERO_ALLOC |
231				      NVOBJ_FLAG_ZERO_FREE, &gpuobj)))  {
232		DRM_ERROR("Error creating sgdma object: %d\n", ret);
233		return ret;
234	}
235#ifdef __linux__
236	dev_priv->gart_info.sg_dummy_page =
237		alloc_page(GFP_KERNEL|__GFP_DMA32);
238	set_page_locked(dev_priv->gart_info.sg_dummy_page);
239	dev_priv->gart_info.sg_dummy_bus =
240		pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
241			     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
242#endif
243	if (dev_priv->card_type < NV_50) {
244		/* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
245		 * confirmed to work on c51.  Perhaps means NV_DMA_TARGET_PCIE
246		 * on those cards? */
247		INSTANCE_WR(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
248				       (1 << 12) /* PT present */ |
249				       (0 << 13) /* PT *not* linear */ |
250				       (NV_DMA_ACCESS_RW  << 14) |
251				       (NV_DMA_TARGET_PCI << 16));
252		INSTANCE_WR(gpuobj, 1, aper_size - 1);
253		for (i=2; i<2+(aper_size>>12); i++) {
254			INSTANCE_WR(gpuobj, i,
255				    dev_priv->gart_info.sg_dummy_bus | 3);
256		}
257	} else {
258		for (i=0; i<obj_size; i+=8) {
259			INSTANCE_WR(gpuobj, (i+0)/4,
260				    dev_priv->gart_info.sg_dummy_bus | 0x21);
261			INSTANCE_WR(gpuobj, (i+4)/4, 0);
262		}
263	}
264
265	dev_priv->gart_info.type      = NOUVEAU_GART_SGDMA;
266	dev_priv->gart_info.aper_base = 0;
267	dev_priv->gart_info.aper_size = aper_size;
268	dev_priv->gart_info.sg_ctxdma = gpuobj;
269	return 0;
270}
271
272void
273nouveau_sgdma_takedown(struct drm_device *dev)
274{
275	struct drm_nouveau_private *dev_priv = dev->dev_private;
276
277	if (dev_priv->gart_info.sg_dummy_page) {
278#ifdef __linux__
279		pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
280			       NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
281		unlock_page(dev_priv->gart_info.sg_dummy_page);
282		__free_page(dev_priv->gart_info.sg_dummy_page);
283#endif
284		dev_priv->gart_info.sg_dummy_page = NULL;
285		dev_priv->gart_info.sg_dummy_bus = 0;
286	}
287
288	nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma);
289}
290
291#if 0
292int
293nouveau_sgdma_nottm_hack_init(struct drm_device *dev)
294{
295	struct drm_nouveau_private *dev_priv = dev->dev_private;
296	struct drm_ttm_backend *be;
297	struct drm_scatter_gather sgreq;
298	struct drm_mm_node mm_node;
299	struct drm_bo_mem_reg mem;
300	int ret;
301
302	dev_priv->gart_info.sg_be = nouveau_sgdma_init_ttm(dev);
303	if (!dev_priv->gart_info.sg_be)
304		return -ENOMEM;
305	be = dev_priv->gart_info.sg_be;
306
307	/* Hack the aperture size down to the amount of system memory
308	 * we're going to bind into it.
309	 */
310	if (dev_priv->gart_info.aper_size > 32*1024*1024)
311		dev_priv->gart_info.aper_size = 32*1024*1024;
312
313	sgreq.size = dev_priv->gart_info.aper_size;
314	if ((ret = drm_sg_alloc(dev, &sgreq))) {
315		DRM_ERROR("drm_sg_alloc failed: %d\n", ret);
316		return ret;
317	}
318	dev_priv->gart_info.sg_handle = sgreq.handle;
319
320	if ((ret = be->func->populate(be, dev->sg->pages, dev->sg->pagelist, dev->bm.dummy_read_page))) {
321		DRM_ERROR("failed populate: %d\n", ret);
322		return ret;
323	}
324
325	mm_node.start = 0;
326	mem.mm_node = &mm_node;
327
328	if ((ret = be->func->bind(be, &mem))) {
329		DRM_ERROR("failed bind: %d\n", ret);
330		return ret;
331	}
332
333	return 0;
334}
335
336void
337nouveau_sgdma_nottm_hack_takedown(struct drm_device *dev)
338{
339}
340#endif
341
342int
343nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
344{
345	struct drm_nouveau_private *dev_priv = dev->dev_private;
346	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
347	int pte;
348
349	pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
350	if (dev_priv->card_type < NV_50) {
351		*page = INSTANCE_RD(gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
352		return 0;
353	}
354
355	DRM_ERROR("Unimplemented on NV50\n");
356	return -EINVAL;
357}
358