• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-WNDR4500-V1.0.1.40_1.0.68/src/linux/linux-2.6/arch/powerpc/platforms/cell/spufs/
1/*
2 * SPU local store allocation routines
3 *
4 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#undef DEBUG
22
23#include <linux/kernel.h>
24#include <linux/mm.h>
25#include <linux/vmalloc.h>
26
27#include <asm/spu.h>
28#include <asm/spu_csa.h>
29#include <asm/mmu.h>
30
31static int spu_alloc_lscsa_std(struct spu_state *csa)
32{
33	struct spu_lscsa *lscsa;
34	unsigned char *p;
35
36	lscsa = vmalloc(sizeof(struct spu_lscsa));
37	if (!lscsa)
38		return -ENOMEM;
39	memset(lscsa, 0, sizeof(struct spu_lscsa));
40	csa->lscsa = lscsa;
41
42	/* Set LS pages reserved to allow for user-space mapping. */
43	for (p = lscsa->ls; p < lscsa->ls + LS_SIZE; p += PAGE_SIZE)
44		SetPageReserved(vmalloc_to_page(p));
45
46	return 0;
47}
48
49static void spu_free_lscsa_std(struct spu_state *csa)
50{
51	/* Clear reserved bit before vfree. */
52	unsigned char *p;
53
54	if (csa->lscsa == NULL)
55		return;
56
57	for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
58		ClearPageReserved(vmalloc_to_page(p));
59
60	vfree(csa->lscsa);
61}
62
63#ifdef CONFIG_SPU_FS_64K_LS
64
65#define SPU_64K_PAGE_SHIFT	16
66#define SPU_64K_PAGE_ORDER	(SPU_64K_PAGE_SHIFT - PAGE_SHIFT)
67#define SPU_64K_PAGE_COUNT	(1ul << SPU_64K_PAGE_ORDER)
68
69int spu_alloc_lscsa(struct spu_state *csa)
70{
71	struct page	**pgarray;
72	unsigned char	*p;
73	int		i, j, n_4k;
74
75	/* Check availability of 64K pages */
76	if (mmu_psize_defs[MMU_PAGE_64K].shift == 0)
77		goto fail;
78
79	csa->use_big_pages = 1;
80
81	pr_debug("spu_alloc_lscsa(csa=0x%p), trying to allocate 64K pages\n",
82		 csa);
83
84	/* First try to allocate our 64K pages. We need 5 of them
85	 * with the current implementation. In the future, we should try
86	 * to separate the lscsa with the actual local store image, thus
87	 * allowing us to require only 4 64K pages per context
88	 */
89	for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++) {
90		csa->lscsa_pages[i] = alloc_pages(GFP_KERNEL,
91						  SPU_64K_PAGE_ORDER);
92		if (csa->lscsa_pages[i] == NULL)
93			goto fail;
94	}
95
96	pr_debug(" success ! creating vmap...\n");
97
98	/* Now we need to create a vmalloc mapping of these for the kernel
99	 * and SPU context switch code to use. Currently, we stick to a
100	 * normal kernel vmalloc mapping, which in our case will be 4K
101	 */
102	n_4k = SPU_64K_PAGE_COUNT * SPU_LSCSA_NUM_BIG_PAGES;
103	pgarray = kmalloc(sizeof(struct page *) * n_4k, GFP_KERNEL);
104	if (pgarray == NULL)
105		goto fail;
106	for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++)
107		for (j = 0; j < SPU_64K_PAGE_COUNT; j++)
108			/* We assume all the struct page's are contiguous
109			 * which should be hopefully the case for an order 4
110			 * allocation..
111			 */
112			pgarray[i * SPU_64K_PAGE_COUNT + j] =
113				csa->lscsa_pages[i] + j;
114	csa->lscsa = vmap(pgarray, n_4k, VM_USERMAP, PAGE_KERNEL);
115	kfree(pgarray);
116	if (csa->lscsa == NULL)
117		goto fail;
118
119	memset(csa->lscsa, 0, sizeof(struct spu_lscsa));
120
121	for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
122		SetPageReserved(vmalloc_to_page(p));
123
124	pr_debug(" all good !\n");
125
126	return 0;
127fail:
128	pr_debug("spufs: failed to allocate lscsa 64K pages, falling back\n");
129	spu_free_lscsa(csa);
130	return spu_alloc_lscsa_std(csa);
131}
132
133void spu_free_lscsa(struct spu_state *csa)
134{
135	unsigned char *p;
136	int i;
137
138	if (!csa->use_big_pages) {
139		spu_free_lscsa_std(csa);
140		return;
141	}
142	csa->use_big_pages = 0;
143
144	if (csa->lscsa == NULL)
145		goto free_pages;
146
147	for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
148		ClearPageReserved(vmalloc_to_page(p));
149
150	vunmap(csa->lscsa);
151	csa->lscsa = NULL;
152
153 free_pages:
154
155	for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++)
156		if (csa->lscsa_pages[i])
157			__free_pages(csa->lscsa_pages[i], SPU_64K_PAGE_ORDER);
158}
159
160#else /* CONFIG_SPU_FS_64K_LS */
161
162int spu_alloc_lscsa(struct spu_state *csa)
163{
164	return spu_alloc_lscsa_std(csa);
165}
166
167void spu_free_lscsa(struct spu_state *csa)
168{
169	spu_free_lscsa_std(csa);
170}
171
172#endif /* !defined(CONFIG_SPU_FS_64K_LS) */
173