• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * SPU local store allocation routines
3  *
4  * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2, or (at your option)
9  * any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19  */
20 
21 #undef DEBUG
22 
23 #include <linux/kernel.h>
24 #include <linux/mm.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 
28 #include <asm/spu.h>
29 #include <asm/spu_csa.h>
30 #include <asm/mmu.h>
31 
32 #include "spufs.h"
33 
spu_alloc_lscsa_std(struct spu_state * csa)34 static int spu_alloc_lscsa_std(struct spu_state *csa)
35 {
36 	struct spu_lscsa *lscsa;
37 	unsigned char *p;
38 
39 	lscsa = vzalloc(sizeof(struct spu_lscsa));
40 	if (!lscsa)
41 		return -ENOMEM;
42 	csa->lscsa = lscsa;
43 
44 	/* Set LS pages reserved to allow for user-space mapping. */
45 	for (p = lscsa->ls; p < lscsa->ls + LS_SIZE; p += PAGE_SIZE)
46 		SetPageReserved(vmalloc_to_page(p));
47 
48 	return 0;
49 }
50 
spu_free_lscsa_std(struct spu_state * csa)51 static void spu_free_lscsa_std(struct spu_state *csa)
52 {
53 	/* Clear reserved bit before vfree. */
54 	unsigned char *p;
55 
56 	if (csa->lscsa == NULL)
57 		return;
58 
59 	for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
60 		ClearPageReserved(vmalloc_to_page(p));
61 
62 	vfree(csa->lscsa);
63 }
64 
65 #ifdef CONFIG_SPU_FS_64K_LS
66 
67 #define SPU_64K_PAGE_SHIFT	16
68 #define SPU_64K_PAGE_ORDER	(SPU_64K_PAGE_SHIFT - PAGE_SHIFT)
69 #define SPU_64K_PAGE_COUNT	(1ul << SPU_64K_PAGE_ORDER)
70 
spu_alloc_lscsa(struct spu_state * csa)71 int spu_alloc_lscsa(struct spu_state *csa)
72 {
73 	struct page	**pgarray;
74 	unsigned char	*p;
75 	int		i, j, n_4k;
76 
77 	/* Check availability of 64K pages */
78 	if (!spu_64k_pages_available())
79 		goto fail;
80 
81 	csa->use_big_pages = 1;
82 
83 	pr_debug("spu_alloc_lscsa(csa=0x%p), trying to allocate 64K pages\n",
84 		 csa);
85 
86 	/* First try to allocate our 64K pages. We need 5 of them
87 	 * with the current implementation. In the future, we should try
88 	 * to separate the lscsa with the actual local store image, thus
89 	 * allowing us to require only 4 64K pages per context
90 	 */
91 	for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++) {
92 		/* XXX This is likely to fail, we should use a special pool
93 		 *     similar to what hugetlbfs does.
94 		 */
95 		csa->lscsa_pages[i] = alloc_pages(GFP_KERNEL,
96 						  SPU_64K_PAGE_ORDER);
97 		if (csa->lscsa_pages[i] == NULL)
98 			goto fail;
99 	}
100 
101 	pr_debug(" success ! creating vmap...\n");
102 
103 	/* Now we need to create a vmalloc mapping of these for the kernel
104 	 * and SPU context switch code to use. Currently, we stick to a
105 	 * normal kernel vmalloc mapping, which in our case will be 4K
106 	 */
107 	n_4k = SPU_64K_PAGE_COUNT * SPU_LSCSA_NUM_BIG_PAGES;
108 	pgarray = kmalloc(sizeof(struct page *) * n_4k, GFP_KERNEL);
109 	if (pgarray == NULL)
110 		goto fail;
111 	for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++)
112 		for (j = 0; j < SPU_64K_PAGE_COUNT; j++)
113 			/* We assume all the struct page's are contiguous
114 			 * which should be hopefully the case for an order 4
115 			 * allocation..
116 			 */
117 			pgarray[i * SPU_64K_PAGE_COUNT + j] =
118 				csa->lscsa_pages[i] + j;
119 	csa->lscsa = vmap(pgarray, n_4k, VM_USERMAP, PAGE_KERNEL);
120 	kfree(pgarray);
121 	if (csa->lscsa == NULL)
122 		goto fail;
123 
124 	memset(csa->lscsa, 0, sizeof(struct spu_lscsa));
125 
126 	/* Set LS pages reserved to allow for user-space mapping.
127 	 *
128 	 * XXX isn't that a bit obsolete ? I think we should just
129 	 * make sure the page count is high enough. Anyway, won't harm
130 	 * for now
131 	 */
132 	for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
133 		SetPageReserved(vmalloc_to_page(p));
134 
135 	pr_debug(" all good !\n");
136 
137 	return 0;
138 fail:
139 	pr_debug("spufs: failed to allocate lscsa 64K pages, falling back\n");
140 	spu_free_lscsa(csa);
141 	return spu_alloc_lscsa_std(csa);
142 }
143 
spu_free_lscsa(struct spu_state * csa)144 void spu_free_lscsa(struct spu_state *csa)
145 {
146 	unsigned char *p;
147 	int i;
148 
149 	if (!csa->use_big_pages) {
150 		spu_free_lscsa_std(csa);
151 		return;
152 	}
153 	csa->use_big_pages = 0;
154 
155 	if (csa->lscsa == NULL)
156 		goto free_pages;
157 
158 	for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
159 		ClearPageReserved(vmalloc_to_page(p));
160 
161 	vunmap(csa->lscsa);
162 	csa->lscsa = NULL;
163 
164  free_pages:
165 
166 	for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++)
167 		if (csa->lscsa_pages[i])
168 			__free_pages(csa->lscsa_pages[i], SPU_64K_PAGE_ORDER);
169 }
170 
171 #else /* CONFIG_SPU_FS_64K_LS */
172 
spu_alloc_lscsa(struct spu_state * csa)173 int spu_alloc_lscsa(struct spu_state *csa)
174 {
175 	return spu_alloc_lscsa_std(csa);
176 }
177 
spu_free_lscsa(struct spu_state * csa)178 void spu_free_lscsa(struct spu_state *csa)
179 {
180 	spu_free_lscsa_std(csa);
181 }
182 
183 #endif /* !defined(CONFIG_SPU_FS_64K_LS) */
184