• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/errno.h>
35 #include <linux/slab.h>
36 #include <linux/mm.h>
37 #include <linux/export.h>
38 #include <linux/bitmap.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/vmalloc.h>
41 
42 #include "mlx4.h"
43 
mlx4_bitmap_alloc(struct mlx4_bitmap * bitmap)44 u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
45 {
46 	u32 obj;
47 
48 	spin_lock(&bitmap->lock);
49 
50 	obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
51 	if (obj >= bitmap->max) {
52 		bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
53 				& bitmap->mask;
54 		obj = find_first_zero_bit(bitmap->table, bitmap->max);
55 	}
56 
57 	if (obj < bitmap->max) {
58 		set_bit(obj, bitmap->table);
59 		bitmap->last = (obj + 1);
60 		if (bitmap->last == bitmap->max)
61 			bitmap->last = 0;
62 		obj |= bitmap->top;
63 	} else
64 		obj = -1;
65 
66 	if (obj != -1)
67 		--bitmap->avail;
68 
69 	spin_unlock(&bitmap->lock);
70 
71 	return obj;
72 }
73 
mlx4_bitmap_free(struct mlx4_bitmap * bitmap,u32 obj)74 void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
75 {
76 	mlx4_bitmap_free_range(bitmap, obj, 1);
77 }
78 
mlx4_bitmap_alloc_range(struct mlx4_bitmap * bitmap,int cnt,int align)79 u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
80 {
81 	u32 obj;
82 
83 	if (likely(cnt == 1 && align == 1))
84 		return mlx4_bitmap_alloc(bitmap);
85 
86 	spin_lock(&bitmap->lock);
87 
88 	obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
89 				bitmap->last, cnt, align - 1);
90 	if (obj >= bitmap->max) {
91 		bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
92 				& bitmap->mask;
93 		obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
94 						0, cnt, align - 1);
95 	}
96 
97 	if (obj < bitmap->max) {
98 		bitmap_set(bitmap->table, obj, cnt);
99 		if (obj == bitmap->last) {
100 			bitmap->last = (obj + cnt);
101 			if (bitmap->last >= bitmap->max)
102 				bitmap->last = 0;
103 		}
104 		obj |= bitmap->top;
105 	} else
106 		obj = -1;
107 
108 	if (obj != -1)
109 		bitmap->avail -= cnt;
110 
111 	spin_unlock(&bitmap->lock);
112 
113 	return obj;
114 }
115 
mlx4_bitmap_avail(struct mlx4_bitmap * bitmap)116 u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap)
117 {
118 	return bitmap->avail;
119 }
120 
mlx4_bitmap_free_range(struct mlx4_bitmap * bitmap,u32 obj,int cnt)121 void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
122 {
123 	obj &= bitmap->max + bitmap->reserved_top - 1;
124 
125 	spin_lock(&bitmap->lock);
126 	bitmap_clear(bitmap->table, obj, cnt);
127 	bitmap->last = min(bitmap->last, obj);
128 	bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
129 			& bitmap->mask;
130 	bitmap->avail += cnt;
131 	spin_unlock(&bitmap->lock);
132 }
133 
mlx4_bitmap_init(struct mlx4_bitmap * bitmap,u32 num,u32 mask,u32 reserved_bot,u32 reserved_top)134 int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
135 		     u32 reserved_bot, u32 reserved_top)
136 {
137 	/* num must be a power of 2 */
138 	if (num != roundup_pow_of_two(num))
139 		return -EINVAL;
140 
141 	bitmap->last = 0;
142 	bitmap->top  = 0;
143 	bitmap->max  = num - reserved_top;
144 	bitmap->mask = mask;
145 	bitmap->reserved_top = reserved_top;
146 	bitmap->avail = num - reserved_top - reserved_bot;
147 	spin_lock_init(&bitmap->lock);
148 	bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
149 				sizeof (long), GFP_KERNEL);
150 	if (!bitmap->table)
151 		return -ENOMEM;
152 
153 	bitmap_set(bitmap->table, 0, reserved_bot);
154 
155 	return 0;
156 }
157 
mlx4_bitmap_cleanup(struct mlx4_bitmap * bitmap)158 void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap)
159 {
160 	kfree(bitmap->table);
161 }
162 
163 /*
164  * Handling for queue buffers -- we allocate a bunch of memory and
165  * register it in a memory region at HCA virtual address 0.  If the
166  * requested size is > max_direct, we split the allocation into
167  * multiple pages, so we don't require too much contiguous memory.
168  */
169 
mlx4_buf_alloc(struct mlx4_dev * dev,int size,int max_direct,struct mlx4_buf * buf)170 int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
171 		   struct mlx4_buf *buf)
172 {
173 	dma_addr_t t;
174 
175 	if (size <= max_direct) {
176 		buf->nbufs        = 1;
177 		buf->npages       = 1;
178 		buf->page_shift   = get_order(size) + PAGE_SHIFT;
179 		buf->direct.buf   = dma_alloc_coherent(&dev->pdev->dev,
180 						       size, &t, GFP_KERNEL);
181 		if (!buf->direct.buf)
182 			return -ENOMEM;
183 
184 		buf->direct.map = t;
185 
186 		while (t & ((1 << buf->page_shift) - 1)) {
187 			--buf->page_shift;
188 			buf->npages *= 2;
189 		}
190 
191 		memset(buf->direct.buf, 0, size);
192 	} else {
193 		int i;
194 
195 		buf->direct.buf  = NULL;
196 		buf->nbufs       = (size + PAGE_SIZE - 1) / PAGE_SIZE;
197 		buf->npages      = buf->nbufs;
198 		buf->page_shift  = PAGE_SHIFT;
199 		buf->page_list   = kcalloc(buf->nbufs, sizeof(*buf->page_list),
200 					   GFP_KERNEL);
201 		if (!buf->page_list)
202 			return -ENOMEM;
203 
204 		for (i = 0; i < buf->nbufs; ++i) {
205 			buf->page_list[i].buf =
206 				dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
207 						   &t, GFP_KERNEL);
208 			if (!buf->page_list[i].buf)
209 				goto err_free;
210 
211 			buf->page_list[i].map = t;
212 
213 			memset(buf->page_list[i].buf, 0, PAGE_SIZE);
214 		}
215 
216 		if (BITS_PER_LONG == 64) {
217 			struct page **pages;
218 			pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
219 			if (!pages)
220 				goto err_free;
221 			for (i = 0; i < buf->nbufs; ++i)
222 				pages[i] = virt_to_page(buf->page_list[i].buf);
223 			buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
224 			kfree(pages);
225 			if (!buf->direct.buf)
226 				goto err_free;
227 		}
228 	}
229 
230 	return 0;
231 
232 err_free:
233 	mlx4_buf_free(dev, size, buf);
234 
235 	return -ENOMEM;
236 }
237 EXPORT_SYMBOL_GPL(mlx4_buf_alloc);
238 
mlx4_buf_free(struct mlx4_dev * dev,int size,struct mlx4_buf * buf)239 void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
240 {
241 	int i;
242 
243 	if (buf->nbufs == 1)
244 		dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
245 				  buf->direct.map);
246 	else {
247 		if (BITS_PER_LONG == 64 && buf->direct.buf)
248 			vunmap(buf->direct.buf);
249 
250 		for (i = 0; i < buf->nbufs; ++i)
251 			if (buf->page_list[i].buf)
252 				dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
253 						  buf->page_list[i].buf,
254 						  buf->page_list[i].map);
255 		kfree(buf->page_list);
256 	}
257 }
258 EXPORT_SYMBOL_GPL(mlx4_buf_free);
259 
mlx4_alloc_db_pgdir(struct device * dma_device)260 static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
261 {
262 	struct mlx4_db_pgdir *pgdir;
263 
264 	pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL);
265 	if (!pgdir)
266 		return NULL;
267 
268 	bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2);
269 	pgdir->bits[0] = pgdir->order0;
270 	pgdir->bits[1] = pgdir->order1;
271 	pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
272 					    &pgdir->db_dma, GFP_KERNEL);
273 	if (!pgdir->db_page) {
274 		kfree(pgdir);
275 		return NULL;
276 	}
277 
278 	return pgdir;
279 }
280 
mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir * pgdir,struct mlx4_db * db,int order)281 static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
282 				    struct mlx4_db *db, int order)
283 {
284 	int o;
285 	int i;
286 
287 	for (o = order; o <= 1; ++o) {
288 		i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o);
289 		if (i < MLX4_DB_PER_PAGE >> o)
290 			goto found;
291 	}
292 
293 	return -ENOMEM;
294 
295 found:
296 	clear_bit(i, pgdir->bits[o]);
297 
298 	i <<= o;
299 
300 	if (o > order)
301 		set_bit(i ^ 1, pgdir->bits[order]);
302 
303 	db->u.pgdir = pgdir;
304 	db->index   = i;
305 	db->db      = pgdir->db_page + db->index;
306 	db->dma     = pgdir->db_dma  + db->index * 4;
307 	db->order   = order;
308 
309 	return 0;
310 }
311 
mlx4_db_alloc(struct mlx4_dev * dev,struct mlx4_db * db,int order)312 int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
313 {
314 	struct mlx4_priv *priv = mlx4_priv(dev);
315 	struct mlx4_db_pgdir *pgdir;
316 	int ret = 0;
317 
318 	mutex_lock(&priv->pgdir_mutex);
319 
320 	list_for_each_entry(pgdir, &priv->pgdir_list, list)
321 		if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
322 			goto out;
323 
324 	pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev));
325 	if (!pgdir) {
326 		ret = -ENOMEM;
327 		goto out;
328 	}
329 
330 	list_add(&pgdir->list, &priv->pgdir_list);
331 
332 	/* This should never fail -- we just allocated an empty page: */
333 	WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order));
334 
335 out:
336 	mutex_unlock(&priv->pgdir_mutex);
337 
338 	return ret;
339 }
340 EXPORT_SYMBOL_GPL(mlx4_db_alloc);
341 
mlx4_db_free(struct mlx4_dev * dev,struct mlx4_db * db)342 void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
343 {
344 	struct mlx4_priv *priv = mlx4_priv(dev);
345 	int o;
346 	int i;
347 
348 	mutex_lock(&priv->pgdir_mutex);
349 
350 	o = db->order;
351 	i = db->index;
352 
353 	if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
354 		clear_bit(i ^ 1, db->u.pgdir->order0);
355 		++o;
356 	}
357 	i >>= o;
358 	set_bit(i, db->u.pgdir->bits[o]);
359 
360 	if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) {
361 		dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
362 				  db->u.pgdir->db_page, db->u.pgdir->db_dma);
363 		list_del(&db->u.pgdir->list);
364 		kfree(db->u.pgdir);
365 	}
366 
367 	mutex_unlock(&priv->pgdir_mutex);
368 }
369 EXPORT_SYMBOL_GPL(mlx4_db_free);
370 
mlx4_alloc_hwq_res(struct mlx4_dev * dev,struct mlx4_hwq_resources * wqres,int size,int max_direct)371 int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
372 		       int size, int max_direct)
373 {
374 	int err;
375 
376 	err = mlx4_db_alloc(dev, &wqres->db, 1);
377 	if (err)
378 		return err;
379 
380 	*wqres->db.db = 0;
381 
382 	err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf);
383 	if (err)
384 		goto err_db;
385 
386 	err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift,
387 			    &wqres->mtt);
388 	if (err)
389 		goto err_buf;
390 
391 	err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf);
392 	if (err)
393 		goto err_mtt;
394 
395 	return 0;
396 
397 err_mtt:
398 	mlx4_mtt_cleanup(dev, &wqres->mtt);
399 err_buf:
400 	mlx4_buf_free(dev, size, &wqres->buf);
401 err_db:
402 	mlx4_db_free(dev, &wqres->db);
403 
404 	return err;
405 }
406 EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res);
407 
mlx4_free_hwq_res(struct mlx4_dev * dev,struct mlx4_hwq_resources * wqres,int size)408 void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
409 		       int size)
410 {
411 	mlx4_mtt_cleanup(dev, &wqres->mtt);
412 	mlx4_buf_free(dev, size, &wqres->buf);
413 	mlx4_db_free(dev, &wqres->db);
414 }
415 EXPORT_SYMBOL_GPL(mlx4_free_hwq_res);
416