ttm_tt.c revision 1.7
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#define pr_fmt(fmt) "[TTM] " fmt
32
33#include <dev/pci/drm/drmP.h>
34#include <dev/pci/drm/drm_cache.h>
35#include <dev/pci/drm/drm_mem_util.h>
36#include <dev/pci/drm/ttm/ttm_module.h>
37#include <dev/pci/drm/ttm/ttm_bo_driver.h>
38#include <dev/pci/drm/ttm/ttm_placement.h>
39#include <dev/pci/drm/ttm/ttm_page_alloc.h>
40
41/**
42 * Allocates storage for pointers to the pages that back the ttm.
43 */
44static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
45{
46	ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
47}
48
49static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
50{
51	ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages,
52					  sizeof(*ttm->ttm.pages) +
53					  sizeof(*ttm->dma_address) +
54					  sizeof(*ttm->cpu_address));
55	ttm->cpu_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
56	ttm->dma_address = (void *) (ttm->cpu_address + ttm->ttm.num_pages);
57}
58
59#ifdef CONFIG_X86
60static inline int ttm_tt_set_page_caching(struct vm_page *p,
61					  enum ttm_caching_state c_old,
62					  enum ttm_caching_state c_new)
63{
64	int ret = 0;
65
66	if (PageHighMem(p))
67		return 0;
68
69	if (c_old != tt_cached) {
70		/* p isn't in the default caching state, set it to
71		 * writeback first to free its current memtype. */
72
73		ret = set_pages_wb(p, 1);
74		if (ret)
75			return ret;
76	}
77
78	if (c_new == tt_wc)
79		ret = set_memory_wc((unsigned long) page_address(p), 1);
80	else if (c_new == tt_uncached)
81		ret = set_pages_uc(p, 1);
82
83	return ret;
84}
85#else /* CONFIG_X86 */
86static inline int ttm_tt_set_page_caching(struct vm_page *p,
87					  enum ttm_caching_state c_old,
88					  enum ttm_caching_state c_new)
89{
90	return 0;
91}
92#endif /* CONFIG_X86 */
93
94/*
95 * Change caching policy for the linear kernel map
96 * for range of pages in a ttm.
97 */
98
99static int ttm_tt_set_caching(struct ttm_tt *ttm,
100			      enum ttm_caching_state c_state)
101{
102	int i, j;
103	struct vm_page *cur_page;
104	int ret;
105
106	if (ttm->caching_state == c_state)
107		return 0;
108
109	if (ttm->state == tt_unpopulated) {
110		/* Change caching but don't populate */
111		ttm->caching_state = c_state;
112		return 0;
113	}
114
115	if (ttm->caching_state == tt_cached)
116		drm_clflush_pages(ttm->pages, ttm->num_pages);
117
118	for (i = 0; i < ttm->num_pages; ++i) {
119		cur_page = ttm->pages[i];
120		if (likely(cur_page != NULL)) {
121			ret = ttm_tt_set_page_caching(cur_page,
122						      ttm->caching_state,
123						      c_state);
124			if (unlikely(ret != 0))
125				goto out_err;
126		}
127	}
128
129	ttm->caching_state = c_state;
130
131	return 0;
132
133out_err:
134	for (j = 0; j < i; ++j) {
135		cur_page = ttm->pages[j];
136		if (likely(cur_page != NULL)) {
137			(void)ttm_tt_set_page_caching(cur_page, c_state,
138						      ttm->caching_state);
139		}
140	}
141
142	return ret;
143}
144
145int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
146{
147	enum ttm_caching_state state;
148
149	if (placement & TTM_PL_FLAG_WC)
150		state = tt_wc;
151	else if (placement & TTM_PL_FLAG_UNCACHED)
152		state = tt_uncached;
153	else
154		state = tt_cached;
155
156	return ttm_tt_set_caching(ttm, state);
157}
158EXPORT_SYMBOL(ttm_tt_set_placement_caching);
159
160void ttm_tt_destroy(struct ttm_tt *ttm)
161{
162	if (unlikely(ttm == NULL))
163		return;
164
165	if (ttm->state == tt_bound) {
166		ttm_tt_unbind(ttm);
167	}
168
169	if (ttm->state == tt_unbound)
170		ttm_tt_unpopulate(ttm);
171
172	if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
173	    ttm->swap_storage)
174		uao_detach(ttm->swap_storage);
175
176	ttm->swap_storage = NULL;
177	ttm->func->destroy(ttm);
178}
179
180int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
181		unsigned long size, uint32_t page_flags,
182		struct vm_page *dummy_read_page)
183{
184	ttm->bdev = bdev;
185	ttm->glob = bdev->glob;
186	ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
187	ttm->caching_state = tt_cached;
188	ttm->page_flags = page_flags;
189	ttm->dummy_read_page = dummy_read_page;
190	ttm->state = tt_unpopulated;
191	ttm->swap_storage = NULL;
192
193	ttm_tt_alloc_page_directory(ttm);
194	if (!ttm->pages) {
195		ttm_tt_destroy(ttm);
196		pr_err("Failed allocating page table\n");
197		return -ENOMEM;
198	}
199	return 0;
200}
201EXPORT_SYMBOL(ttm_tt_init);
202
203void ttm_tt_fini(struct ttm_tt *ttm)
204{
205	drm_free_large(ttm->pages);
206	ttm->pages = NULL;
207}
208EXPORT_SYMBOL(ttm_tt_fini);
209
210int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
211		unsigned long size, uint32_t page_flags,
212		struct vm_page *dummy_read_page)
213{
214	struct ttm_tt *ttm = &ttm_dma->ttm;
215
216	ttm->bdev = bdev;
217	ttm->glob = bdev->glob;
218	ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
219	ttm->caching_state = tt_cached;
220	ttm->page_flags = page_flags;
221	ttm->dummy_read_page = dummy_read_page;
222	ttm->state = tt_unpopulated;
223	ttm->swap_storage = NULL;
224
225	INIT_LIST_HEAD(&ttm_dma->pages_list);
226	ttm_dma_tt_alloc_page_directory(ttm_dma);
227	if (!ttm->pages) {
228		ttm_tt_destroy(ttm);
229		pr_err("Failed allocating page table\n");
230		return -ENOMEM;
231	}
232	return 0;
233}
234EXPORT_SYMBOL(ttm_dma_tt_init);
235
236void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
237{
238	struct ttm_tt *ttm = &ttm_dma->ttm;
239
240	drm_free_large(ttm->pages);
241	ttm->pages = NULL;
242	ttm_dma->cpu_address = NULL;
243	ttm_dma->dma_address = NULL;
244}
245EXPORT_SYMBOL(ttm_dma_tt_fini);
246
247void ttm_tt_unbind(struct ttm_tt *ttm)
248{
249	int ret;
250
251	if (ttm->state == tt_bound) {
252		ret = ttm->func->unbind(ttm);
253		BUG_ON(ret);
254		ttm->state = tt_unbound;
255	}
256}
257
258int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
259{
260	int ret = 0;
261
262	if (!ttm)
263		return -EINVAL;
264
265	if (ttm->state == tt_bound)
266		return 0;
267
268	ret = ttm->bdev->driver->ttm_tt_populate(ttm);
269	if (ret)
270		return ret;
271
272	ret = ttm->func->bind(ttm, bo_mem);
273	if (unlikely(ret != 0))
274		return ret;
275
276	ttm->state = tt_bound;
277
278	return 0;
279}
280EXPORT_SYMBOL(ttm_tt_bind);
281
282int ttm_tt_swapin(struct ttm_tt *ttm)
283{
284	struct uvm_object *swap_storage;
285	struct vm_page *from_page;
286	struct vm_page *to_page;
287	struct pglist plist;
288	int i;
289	int ret = -ENOMEM;
290
291	swap_storage = ttm->swap_storage;
292	BUG_ON(swap_storage == NULL);
293
294	TAILQ_INIT(&plist);
295	if (uvm_objwire(swap_storage, 0, ttm->num_pages << PAGE_SHIFT, &plist))
296		goto out_err;
297
298	from_page = TAILQ_FIRST(&plist);
299	for (i = 0; i < ttm->num_pages; ++i) {
300		to_page = ttm->pages[i];
301		if (unlikely(to_page == NULL))
302			goto out_err;
303
304		uvm_pagecopy(from_page, to_page);
305		from_page = TAILQ_NEXT(from_page, pageq);
306	}
307
308	uvm_objunwire(swap_storage, 0, ttm->num_pages << PAGE_SHIFT);
309
310	if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
311		uao_detach(swap_storage);
312	ttm->swap_storage = NULL;
313	ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
314
315
316	return 0;
317out_err:
318	return ret;
319}
320
321int ttm_tt_swapout(struct ttm_tt *ttm, struct uvm_object *persistent_swap_storage)
322{
323	struct uvm_object *swap_storage;
324	struct vm_page *from_page;
325	struct vm_page *to_page;
326	struct pglist plist;
327	int i;
328	int ret = -ENOMEM;
329
330	BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
331	BUG_ON(ttm->caching_state != tt_cached);
332
333	if (!persistent_swap_storage) {
334		swap_storage = uao_create(ttm->num_pages << PAGE_SHIFT, 0);
335#ifdef notyet
336		if (unlikely(IS_ERR(swap_storage))) {
337			pr_err("Failed allocating swap storage\n");
338			return PTR_ERR(swap_storage);
339		}
340#endif
341	} else
342		swap_storage = persistent_swap_storage;
343
344	TAILQ_INIT(&plist);
345	if (uvm_objwire(swap_storage, 0, ttm->num_pages << PAGE_SHIFT, &plist))
346		goto out_err;
347
348	to_page = TAILQ_FIRST(&plist);
349	for (i = 0; i < ttm->num_pages; ++i) {
350		from_page = ttm->pages[i];
351		if (unlikely(from_page == NULL))
352			continue;
353		uvm_pagecopy(from_page, to_page);
354#ifdef notyet
355		set_page_dirty(to_page);
356		mark_page_accessed(to_page);
357#endif
358		to_page = TAILQ_NEXT(to_page, pageq);
359	}
360
361	uvm_objunwire(swap_storage, 0, ttm->num_pages << PAGE_SHIFT);
362
363	ttm->bdev->driver->ttm_tt_unpopulate(ttm);
364	ttm->swap_storage = swap_storage;
365	ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
366	if (persistent_swap_storage)
367		ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
368
369	return 0;
370out_err:
371	if (!persistent_swap_storage)
372		uao_detach(swap_storage);
373
374	return ret;
375}
376
377static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
378{
379	int i;
380	struct vm_page *page;
381
382	if (ttm->page_flags & TTM_PAGE_FLAG_SG)
383		return;
384
385	for (i = 0; i < ttm->num_pages; ++i) {
386		page = ttm->pages[i];
387		if (unlikely(page == NULL))
388			continue;
389		pmap_page_protect(page, PROT_NONE);
390	}
391}
392
393void ttm_tt_unpopulate(struct ttm_tt *ttm)
394{
395	if (ttm->state == tt_unpopulated)
396		return;
397
398	ttm_tt_clear_mapping(ttm);
399	ttm->bdev->driver->ttm_tt_unpopulate(ttm);
400}
401