• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  PS3 address space management.
3  *
4  *  Copyright (C) 2006 Sony Computer Entertainment Inc.
5  *  Copyright 2006 Sony Corp.
6  *
7  *  This program is free software; you can redistribute it and/or modify
8  *  it under the terms of the GNU General Public License as published by
9  *  the Free Software Foundation; version 2 of the License.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, write to the Free Software
18  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19  */
20 
21 #include <linux/dma-mapping.h>
22 #include <linux/kernel.h>
23 #include <linux/export.h>
24 #include <linux/memblock.h>
25 #include <linux/slab.h>
26 
27 #include <asm/cell-regs.h>
28 #include <asm/firmware.h>
29 #include <asm/prom.h>
30 #include <asm/udbg.h>
31 #include <asm/lv1call.h>
32 #include <asm/setup.h>
33 
34 #include "platform.h"
35 
36 #if defined(DEBUG)
37 #define DBG udbg_printf
38 #else
39 #define DBG pr_devel
40 #endif
41 
42 enum {
43 #if defined(CONFIG_PS3_DYNAMIC_DMA)
44 	USE_DYNAMIC_DMA = 1,
45 #else
46 	USE_DYNAMIC_DMA = 0,
47 #endif
48 };
49 
50 enum {
51 	PAGE_SHIFT_4K = 12U,
52 	PAGE_SHIFT_64K = 16U,
53 	PAGE_SHIFT_16M = 24U,
54 };
55 
make_page_sizes(unsigned long a,unsigned long b)56 static unsigned long make_page_sizes(unsigned long a, unsigned long b)
57 {
58 	return (a << 56) | (b << 48);
59 }
60 
61 enum {
62 	ALLOCATE_MEMORY_TRY_ALT_UNIT = 0X04,
63 	ALLOCATE_MEMORY_ADDR_ZERO = 0X08,
64 };
65 
66 /* valid htab sizes are {18,19,20} = 256K, 512K, 1M */
67 
68 enum {
69 	HTAB_SIZE_MAX = 20U, /* HV limit of 1MB */
70 	HTAB_SIZE_MIN = 18U, /* CPU limit of 256KB */
71 };
72 
73 /*============================================================================*/
74 /* virtual address space routines                                             */
75 /*============================================================================*/
76 
77 /**
78  * struct mem_region - memory region structure
79  * @base: base address
80  * @size: size in bytes
81  * @offset: difference between base and rm.size
82  * @destroy: flag if region should be destroyed upon shutdown
83  */
84 
85 struct mem_region {
86 	u64 base;
87 	u64 size;
88 	unsigned long offset;
89 	int destroy;
90 };
91 
92 /**
93  * struct map - address space state variables holder
94  * @total: total memory available as reported by HV
95  * @vas_id - HV virtual address space id
96  * @htab_size: htab size in bytes
97  *
98  * The HV virtual address space (vas) allows for hotplug memory regions.
99  * Memory regions can be created and destroyed in the vas at runtime.
100  * @rm: real mode (bootmem) region
101  * @r1: highmem region(s)
102  *
103  * ps3 addresses
104  * virt_addr: a cpu 'translated' effective address
105  * phys_addr: an address in what Linux thinks is the physical address space
106  * lpar_addr: an address in the HV virtual address space
107  * bus_addr: an io controller 'translated' address on a device bus
108  */
109 
110 struct map {
111 	u64 total;
112 	u64 vas_id;
113 	u64 htab_size;
114 	struct mem_region rm;
115 	struct mem_region r1;
116 };
117 
118 #define debug_dump_map(x) _debug_dump_map(x, __func__, __LINE__)
_debug_dump_map(const struct map * m,const char * func,int line)119 static void __maybe_unused _debug_dump_map(const struct map *m,
120 	const char *func, int line)
121 {
122 	DBG("%s:%d: map.total     = %llxh\n", func, line, m->total);
123 	DBG("%s:%d: map.rm.size   = %llxh\n", func, line, m->rm.size);
124 	DBG("%s:%d: map.vas_id    = %llu\n", func, line, m->vas_id);
125 	DBG("%s:%d: map.htab_size = %llxh\n", func, line, m->htab_size);
126 	DBG("%s:%d: map.r1.base   = %llxh\n", func, line, m->r1.base);
127 	DBG("%s:%d: map.r1.offset = %lxh\n", func, line, m->r1.offset);
128 	DBG("%s:%d: map.r1.size   = %llxh\n", func, line, m->r1.size);
129 }
130 
131 static struct map map;
132 
133 /**
134  * ps3_mm_phys_to_lpar - translate a linux physical address to lpar address
135  * @phys_addr: linux physical address
136  */
137 
ps3_mm_phys_to_lpar(unsigned long phys_addr)138 unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr)
139 {
140 	BUG_ON(is_kernel_addr(phys_addr));
141 	return (phys_addr < map.rm.size || phys_addr >= map.total)
142 		? phys_addr : phys_addr + map.r1.offset;
143 }
144 
145 EXPORT_SYMBOL(ps3_mm_phys_to_lpar);
146 
147 /**
148  * ps3_mm_vas_create - create the virtual address space
149  */
150 
ps3_mm_vas_create(unsigned long * htab_size)151 void __init ps3_mm_vas_create(unsigned long* htab_size)
152 {
153 	int result;
154 	u64 start_address;
155 	u64 size;
156 	u64 access_right;
157 	u64 max_page_size;
158 	u64 flags;
159 
160 	result = lv1_query_logical_partition_address_region_info(0,
161 		&start_address, &size, &access_right, &max_page_size,
162 		&flags);
163 
164 	if (result) {
165 		DBG("%s:%d: lv1_query_logical_partition_address_region_info "
166 			"failed: %s\n", __func__, __LINE__,
167 			ps3_result(result));
168 		goto fail;
169 	}
170 
171 	if (max_page_size < PAGE_SHIFT_16M) {
172 		DBG("%s:%d: bad max_page_size %llxh\n", __func__, __LINE__,
173 			max_page_size);
174 		goto fail;
175 	}
176 
177 	BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE > HTAB_SIZE_MAX);
178 	BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE < HTAB_SIZE_MIN);
179 
180 	result = lv1_construct_virtual_address_space(CONFIG_PS3_HTAB_SIZE,
181 			2, make_page_sizes(PAGE_SHIFT_16M, PAGE_SHIFT_64K),
182 			&map.vas_id, &map.htab_size);
183 
184 	if (result) {
185 		DBG("%s:%d: lv1_construct_virtual_address_space failed: %s\n",
186 			__func__, __LINE__, ps3_result(result));
187 		goto fail;
188 	}
189 
190 	result = lv1_select_virtual_address_space(map.vas_id);
191 
192 	if (result) {
193 		DBG("%s:%d: lv1_select_virtual_address_space failed: %s\n",
194 			__func__, __LINE__, ps3_result(result));
195 		goto fail;
196 	}
197 
198 	*htab_size = map.htab_size;
199 
200 	debug_dump_map(&map);
201 
202 	return;
203 
204 fail:
205 	panic("ps3_mm_vas_create failed");
206 }
207 
208 /**
209  * ps3_mm_vas_destroy -
210  */
211 
ps3_mm_vas_destroy(void)212 void ps3_mm_vas_destroy(void)
213 {
214 	int result;
215 
216 	if (map.vas_id) {
217 		result = lv1_select_virtual_address_space(0);
218 		result += lv1_destruct_virtual_address_space(map.vas_id);
219 
220 		if (result) {
221 			lv1_panic(0);
222 		}
223 
224 		map.vas_id = 0;
225 	}
226 }
227 
ps3_mm_get_repository_highmem(struct mem_region * r)228 static int ps3_mm_get_repository_highmem(struct mem_region *r)
229 {
230 	int result;
231 
232 	/* Assume a single highmem region. */
233 
234 	result = ps3_repository_read_highmem_info(0, &r->base, &r->size);
235 
236 	if (result)
237 		goto zero_region;
238 
239 	if (!r->base || !r->size) {
240 		result = -1;
241 		goto zero_region;
242 	}
243 
244 	r->offset = r->base - map.rm.size;
245 
246 	DBG("%s:%d: Found high region in repository: %llxh %llxh\n",
247 	    __func__, __LINE__, r->base, r->size);
248 
249 	return 0;
250 
251 zero_region:
252 	DBG("%s:%d: No high region in repository.\n", __func__, __LINE__);
253 
254 	r->size = r->base = r->offset = 0;
255 	return result;
256 }
257 
ps3_mm_set_repository_highmem(const struct mem_region * r)258 static int ps3_mm_set_repository_highmem(const struct mem_region *r)
259 {
260 	/* Assume a single highmem region. */
261 
262 	return r ? ps3_repository_write_highmem_info(0, r->base, r->size) :
263 		ps3_repository_write_highmem_info(0, 0, 0);
264 }
265 
266 /**
267  * ps3_mm_region_create - create a memory region in the vas
268  * @r: pointer to a struct mem_region to accept initialized values
269  * @size: requested region size
270  *
271  * This implementation creates the region with the vas large page size.
272  * @size is rounded down to a multiple of the vas large page size.
273  */
274 
ps3_mm_region_create(struct mem_region * r,unsigned long size)275 static int ps3_mm_region_create(struct mem_region *r, unsigned long size)
276 {
277 	int result;
278 	u64 muid;
279 
280 	r->size = _ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M);
281 
282 	DBG("%s:%d requested  %lxh\n", __func__, __LINE__, size);
283 	DBG("%s:%d actual     %llxh\n", __func__, __LINE__, r->size);
284 	DBG("%s:%d difference %llxh (%lluMB)\n", __func__, __LINE__,
285 		size - r->size, (size - r->size) / 1024 / 1024);
286 
287 	if (r->size == 0) {
288 		DBG("%s:%d: size == 0\n", __func__, __LINE__);
289 		result = -1;
290 		goto zero_region;
291 	}
292 
293 	result = lv1_allocate_memory(r->size, PAGE_SHIFT_16M, 0,
294 		ALLOCATE_MEMORY_TRY_ALT_UNIT, &r->base, &muid);
295 
296 	if (result || r->base < map.rm.size) {
297 		DBG("%s:%d: lv1_allocate_memory failed: %s\n",
298 			__func__, __LINE__, ps3_result(result));
299 		goto zero_region;
300 	}
301 
302 	r->destroy = 1;
303 	r->offset = r->base - map.rm.size;
304 	return result;
305 
306 zero_region:
307 	r->size = r->base = r->offset = 0;
308 	return result;
309 }
310 
311 /**
312  * ps3_mm_region_destroy - destroy a memory region
313  * @r: pointer to struct mem_region
314  */
315 
ps3_mm_region_destroy(struct mem_region * r)316 static void ps3_mm_region_destroy(struct mem_region *r)
317 {
318 	int result;
319 
320 	if (!r->destroy) {
321 		return;
322 	}
323 
324 	if (r->base) {
325 		result = lv1_release_memory(r->base);
326 
327 		if (result) {
328 			lv1_panic(0);
329 		}
330 
331 		r->size = r->base = r->offset = 0;
332 		map.total = map.rm.size;
333 	}
334 
335 	ps3_mm_set_repository_highmem(NULL);
336 }
337 
338 /*============================================================================*/
339 /* dma routines                                                               */
340 /*============================================================================*/
341 
342 /**
343  * dma_sb_lpar_to_bus - Translate an lpar address to ioc mapped bus address.
344  * @r: pointer to dma region structure
345  * @lpar_addr: HV lpar address
346  */
347 
dma_sb_lpar_to_bus(struct ps3_dma_region * r,unsigned long lpar_addr)348 static unsigned long dma_sb_lpar_to_bus(struct ps3_dma_region *r,
349 	unsigned long lpar_addr)
350 {
351 	if (lpar_addr >= map.rm.size)
352 		lpar_addr -= map.r1.offset;
353 	BUG_ON(lpar_addr < r->offset);
354 	BUG_ON(lpar_addr >= r->offset + r->len);
355 	return r->bus_addr + lpar_addr - r->offset;
356 }
357 
358 #define dma_dump_region(_a) _dma_dump_region(_a, __func__, __LINE__)
_dma_dump_region(const struct ps3_dma_region * r,const char * func,int line)359 static void  __maybe_unused _dma_dump_region(const struct ps3_dma_region *r,
360 	const char *func, int line)
361 {
362 	DBG("%s:%d: dev        %llu:%llu\n", func, line, r->dev->bus_id,
363 		r->dev->dev_id);
364 	DBG("%s:%d: page_size  %u\n", func, line, r->page_size);
365 	DBG("%s:%d: bus_addr   %lxh\n", func, line, r->bus_addr);
366 	DBG("%s:%d: len        %lxh\n", func, line, r->len);
367 	DBG("%s:%d: offset     %lxh\n", func, line, r->offset);
368 }
369 
370   /**
371  * dma_chunk - A chunk of dma pages mapped by the io controller.
372  * @region - The dma region that owns this chunk.
373  * @lpar_addr: Starting lpar address of the area to map.
374  * @bus_addr: Starting ioc bus address of the area to map.
375  * @len: Length in bytes of the area to map.
376  * @link: A struct list_head used with struct ps3_dma_region.chunk_list, the
377  * list of all chuncks owned by the region.
378  *
379  * This implementation uses a very simple dma page manager
380  * based on the dma_chunk structure.  This scheme assumes
381  * that all drivers use very well behaved dma ops.
382  */
383 
384 struct dma_chunk {
385 	struct ps3_dma_region *region;
386 	unsigned long lpar_addr;
387 	unsigned long bus_addr;
388 	unsigned long len;
389 	struct list_head link;
390 	unsigned int usage_count;
391 };
392 
393 #define dma_dump_chunk(_a) _dma_dump_chunk(_a, __func__, __LINE__)
_dma_dump_chunk(const struct dma_chunk * c,const char * func,int line)394 static void _dma_dump_chunk (const struct dma_chunk* c, const char* func,
395 	int line)
396 {
397 	DBG("%s:%d: r.dev        %llu:%llu\n", func, line,
398 		c->region->dev->bus_id, c->region->dev->dev_id);
399 	DBG("%s:%d: r.bus_addr   %lxh\n", func, line, c->region->bus_addr);
400 	DBG("%s:%d: r.page_size  %u\n", func, line, c->region->page_size);
401 	DBG("%s:%d: r.len        %lxh\n", func, line, c->region->len);
402 	DBG("%s:%d: r.offset     %lxh\n", func, line, c->region->offset);
403 	DBG("%s:%d: c.lpar_addr  %lxh\n", func, line, c->lpar_addr);
404 	DBG("%s:%d: c.bus_addr   %lxh\n", func, line, c->bus_addr);
405 	DBG("%s:%d: c.len        %lxh\n", func, line, c->len);
406 }
407 
dma_find_chunk(struct ps3_dma_region * r,unsigned long bus_addr,unsigned long len)408 static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r,
409 	unsigned long bus_addr, unsigned long len)
410 {
411 	struct dma_chunk *c;
412 	unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 1 << r->page_size);
413 	unsigned long aligned_len = _ALIGN_UP(len+bus_addr-aligned_bus,
414 					      1 << r->page_size);
415 
416 	list_for_each_entry(c, &r->chunk_list.head, link) {
417 		/* intersection */
418 		if (aligned_bus >= c->bus_addr &&
419 		    aligned_bus + aligned_len <= c->bus_addr + c->len)
420 			return c;
421 
422 		/* below */
423 		if (aligned_bus + aligned_len <= c->bus_addr)
424 			continue;
425 
426 		/* above */
427 		if (aligned_bus >= c->bus_addr + c->len)
428 			continue;
429 
430 		/* we don't handle the multi-chunk case for now */
431 		dma_dump_chunk(c);
432 		BUG();
433 	}
434 	return NULL;
435 }
436 
dma_find_chunk_lpar(struct ps3_dma_region * r,unsigned long lpar_addr,unsigned long len)437 static struct dma_chunk *dma_find_chunk_lpar(struct ps3_dma_region *r,
438 	unsigned long lpar_addr, unsigned long len)
439 {
440 	struct dma_chunk *c;
441 	unsigned long aligned_lpar = _ALIGN_DOWN(lpar_addr, 1 << r->page_size);
442 	unsigned long aligned_len = _ALIGN_UP(len + lpar_addr - aligned_lpar,
443 					      1 << r->page_size);
444 
445 	list_for_each_entry(c, &r->chunk_list.head, link) {
446 		/* intersection */
447 		if (c->lpar_addr <= aligned_lpar &&
448 		    aligned_lpar < c->lpar_addr + c->len) {
449 			if (aligned_lpar + aligned_len <= c->lpar_addr + c->len)
450 				return c;
451 			else {
452 				dma_dump_chunk(c);
453 				BUG();
454 			}
455 		}
456 		/* below */
457 		if (aligned_lpar + aligned_len <= c->lpar_addr) {
458 			continue;
459 		}
460 		/* above */
461 		if (c->lpar_addr + c->len <= aligned_lpar) {
462 			continue;
463 		}
464 	}
465 	return NULL;
466 }
467 
dma_sb_free_chunk(struct dma_chunk * c)468 static int dma_sb_free_chunk(struct dma_chunk *c)
469 {
470 	int result = 0;
471 
472 	if (c->bus_addr) {
473 		result = lv1_unmap_device_dma_region(c->region->dev->bus_id,
474 			c->region->dev->dev_id, c->bus_addr, c->len);
475 		BUG_ON(result);
476 	}
477 
478 	kfree(c);
479 	return result;
480 }
481 
dma_ioc0_free_chunk(struct dma_chunk * c)482 static int dma_ioc0_free_chunk(struct dma_chunk *c)
483 {
484 	int result = 0;
485 	int iopage;
486 	unsigned long offset;
487 	struct ps3_dma_region *r = c->region;
488 
489 	DBG("%s:start\n", __func__);
490 	for (iopage = 0; iopage < (c->len >> r->page_size); iopage++) {
491 		offset = (1 << r->page_size) * iopage;
492 		/* put INVALID entry */
493 		result = lv1_put_iopte(0,
494 				       c->bus_addr + offset,
495 				       c->lpar_addr + offset,
496 				       r->ioid,
497 				       0);
498 		DBG("%s: bus=%#lx, lpar=%#lx, ioid=%d\n", __func__,
499 		    c->bus_addr + offset,
500 		    c->lpar_addr + offset,
501 		    r->ioid);
502 
503 		if (result) {
504 			DBG("%s:%d: lv1_put_iopte failed: %s\n", __func__,
505 			    __LINE__, ps3_result(result));
506 		}
507 	}
508 	kfree(c);
509 	DBG("%s:end\n", __func__);
510 	return result;
511 }
512 
513 /**
514  * dma_sb_map_pages - Maps dma pages into the io controller bus address space.
515  * @r: Pointer to a struct ps3_dma_region.
516  * @phys_addr: Starting physical address of the area to map.
517  * @len: Length in bytes of the area to map.
518  * c_out: A pointer to receive an allocated struct dma_chunk for this area.
519  *
520  * This is the lowest level dma mapping routine, and is the one that will
521  * make the HV call to add the pages into the io controller address space.
522  */
523 
dma_sb_map_pages(struct ps3_dma_region * r,unsigned long phys_addr,unsigned long len,struct dma_chunk ** c_out,u64 iopte_flag)524 static int dma_sb_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
525 	    unsigned long len, struct dma_chunk **c_out, u64 iopte_flag)
526 {
527 	int result;
528 	struct dma_chunk *c;
529 
530 	c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC);
531 
532 	if (!c) {
533 		result = -ENOMEM;
534 		goto fail_alloc;
535 	}
536 
537 	c->region = r;
538 	c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
539 	c->bus_addr = dma_sb_lpar_to_bus(r, c->lpar_addr);
540 	c->len = len;
541 
542 	BUG_ON(iopte_flag != 0xf800000000000000UL);
543 	result = lv1_map_device_dma_region(c->region->dev->bus_id,
544 					   c->region->dev->dev_id, c->lpar_addr,
545 					   c->bus_addr, c->len, iopte_flag);
546 	if (result) {
547 		DBG("%s:%d: lv1_map_device_dma_region failed: %s\n",
548 			__func__, __LINE__, ps3_result(result));
549 		goto fail_map;
550 	}
551 
552 	list_add(&c->link, &r->chunk_list.head);
553 
554 	*c_out = c;
555 	return 0;
556 
557 fail_map:
558 	kfree(c);
559 fail_alloc:
560 	*c_out = NULL;
561 	DBG(" <- %s:%d\n", __func__, __LINE__);
562 	return result;
563 }
564 
dma_ioc0_map_pages(struct ps3_dma_region * r,unsigned long phys_addr,unsigned long len,struct dma_chunk ** c_out,u64 iopte_flag)565 static int dma_ioc0_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
566 			      unsigned long len, struct dma_chunk **c_out,
567 			      u64 iopte_flag)
568 {
569 	int result;
570 	struct dma_chunk *c, *last;
571 	int iopage, pages;
572 	unsigned long offset;
573 
574 	DBG(KERN_ERR "%s: phy=%#lx, lpar%#lx, len=%#lx\n", __func__,
575 	    phys_addr, ps3_mm_phys_to_lpar(phys_addr), len);
576 	c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC);
577 
578 	if (!c) {
579 		result = -ENOMEM;
580 		goto fail_alloc;
581 	}
582 
583 	c->region = r;
584 	c->len = len;
585 	c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
586 	/* allocate IO address */
587 	if (list_empty(&r->chunk_list.head)) {
588 		/* first one */
589 		c->bus_addr = r->bus_addr;
590 	} else {
591 		/* derive from last bus addr*/
592 		last  = list_entry(r->chunk_list.head.next,
593 				   struct dma_chunk, link);
594 		c->bus_addr = last->bus_addr + last->len;
595 		DBG("%s: last bus=%#lx, len=%#lx\n", __func__,
596 		    last->bus_addr, last->len);
597 	}
598 
599 	/* FIXME: check whether length exceeds region size */
600 
601 	/* build ioptes for the area */
602 	pages = len >> r->page_size;
603 	DBG("%s: pgsize=%#x len=%#lx pages=%#x iopteflag=%#llx\n", __func__,
604 	    r->page_size, r->len, pages, iopte_flag);
605 	for (iopage = 0; iopage < pages; iopage++) {
606 		offset = (1 << r->page_size) * iopage;
607 		result = lv1_put_iopte(0,
608 				       c->bus_addr + offset,
609 				       c->lpar_addr + offset,
610 				       r->ioid,
611 				       iopte_flag);
612 		if (result) {
613 			pr_warning("%s:%d: lv1_put_iopte failed: %s\n",
614 				   __func__, __LINE__, ps3_result(result));
615 			goto fail_map;
616 		}
617 		DBG("%s: pg=%d bus=%#lx, lpar=%#lx, ioid=%#x\n", __func__,
618 		    iopage, c->bus_addr + offset, c->lpar_addr + offset,
619 		    r->ioid);
620 	}
621 
622 	/* be sure that last allocated one is inserted at head */
623 	list_add(&c->link, &r->chunk_list.head);
624 
625 	*c_out = c;
626 	DBG("%s: end\n", __func__);
627 	return 0;
628 
629 fail_map:
630 	for (iopage--; 0 <= iopage; iopage--) {
631 		lv1_put_iopte(0,
632 			      c->bus_addr + offset,
633 			      c->lpar_addr + offset,
634 			      r->ioid,
635 			      0);
636 	}
637 	kfree(c);
638 fail_alloc:
639 	*c_out = NULL;
640 	return result;
641 }
642 
643 /**
644  * dma_sb_region_create - Create a device dma region.
645  * @r: Pointer to a struct ps3_dma_region.
646  *
647  * This is the lowest level dma region create routine, and is the one that
648  * will make the HV call to create the region.
649  */
650 
dma_sb_region_create(struct ps3_dma_region * r)651 static int dma_sb_region_create(struct ps3_dma_region *r)
652 {
653 	int result;
654 	u64 bus_addr;
655 
656 	DBG(" -> %s:%d:\n", __func__, __LINE__);
657 
658 	BUG_ON(!r);
659 
660 	if (!r->dev->bus_id) {
661 		pr_info("%s:%d: %llu:%llu no dma\n", __func__, __LINE__,
662 			r->dev->bus_id, r->dev->dev_id);
663 		return 0;
664 	}
665 
666 	DBG("%s:%u: len = 0x%lx, page_size = %u, offset = 0x%lx\n", __func__,
667 	    __LINE__, r->len, r->page_size, r->offset);
668 
669 	BUG_ON(!r->len);
670 	BUG_ON(!r->page_size);
671 	BUG_ON(!r->region_ops);
672 
673 	INIT_LIST_HEAD(&r->chunk_list.head);
674 	spin_lock_init(&r->chunk_list.lock);
675 
676 	result = lv1_allocate_device_dma_region(r->dev->bus_id, r->dev->dev_id,
677 		roundup_pow_of_two(r->len), r->page_size, r->region_type,
678 		&bus_addr);
679 	r->bus_addr = bus_addr;
680 
681 	if (result) {
682 		DBG("%s:%d: lv1_allocate_device_dma_region failed: %s\n",
683 			__func__, __LINE__, ps3_result(result));
684 		r->len = r->bus_addr = 0;
685 	}
686 
687 	return result;
688 }
689 
dma_ioc0_region_create(struct ps3_dma_region * r)690 static int dma_ioc0_region_create(struct ps3_dma_region *r)
691 {
692 	int result;
693 	u64 bus_addr;
694 
695 	INIT_LIST_HEAD(&r->chunk_list.head);
696 	spin_lock_init(&r->chunk_list.lock);
697 
698 	result = lv1_allocate_io_segment(0,
699 					 r->len,
700 					 r->page_size,
701 					 &bus_addr);
702 	r->bus_addr = bus_addr;
703 	if (result) {
704 		DBG("%s:%d: lv1_allocate_io_segment failed: %s\n",
705 			__func__, __LINE__, ps3_result(result));
706 		r->len = r->bus_addr = 0;
707 	}
708 	DBG("%s: len=%#lx, pg=%d, bus=%#lx\n", __func__,
709 	    r->len, r->page_size, r->bus_addr);
710 	return result;
711 }
712 
713 /**
714  * dma_region_free - Free a device dma region.
715  * @r: Pointer to a struct ps3_dma_region.
716  *
717  * This is the lowest level dma region free routine, and is the one that
718  * will make the HV call to free the region.
719  */
720 
dma_sb_region_free(struct ps3_dma_region * r)721 static int dma_sb_region_free(struct ps3_dma_region *r)
722 {
723 	int result;
724 	struct dma_chunk *c;
725 	struct dma_chunk *tmp;
726 
727 	BUG_ON(!r);
728 
729 	if (!r->dev->bus_id) {
730 		pr_info("%s:%d: %llu:%llu no dma\n", __func__, __LINE__,
731 			r->dev->bus_id, r->dev->dev_id);
732 		return 0;
733 	}
734 
735 	list_for_each_entry_safe(c, tmp, &r->chunk_list.head, link) {
736 		list_del(&c->link);
737 		dma_sb_free_chunk(c);
738 	}
739 
740 	result = lv1_free_device_dma_region(r->dev->bus_id, r->dev->dev_id,
741 		r->bus_addr);
742 
743 	if (result)
744 		DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
745 			__func__, __LINE__, ps3_result(result));
746 
747 	r->bus_addr = 0;
748 
749 	return result;
750 }
751 
dma_ioc0_region_free(struct ps3_dma_region * r)752 static int dma_ioc0_region_free(struct ps3_dma_region *r)
753 {
754 	int result;
755 	struct dma_chunk *c, *n;
756 
757 	DBG("%s: start\n", __func__);
758 	list_for_each_entry_safe(c, n, &r->chunk_list.head, link) {
759 		list_del(&c->link);
760 		dma_ioc0_free_chunk(c);
761 	}
762 
763 	result = lv1_release_io_segment(0, r->bus_addr);
764 
765 	if (result)
766 		DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
767 			__func__, __LINE__, ps3_result(result));
768 
769 	r->bus_addr = 0;
770 	DBG("%s: end\n", __func__);
771 
772 	return result;
773 }
774 
775 /**
776  * dma_sb_map_area - Map an area of memory into a device dma region.
777  * @r: Pointer to a struct ps3_dma_region.
778  * @virt_addr: Starting virtual address of the area to map.
779  * @len: Length in bytes of the area to map.
780  * @bus_addr: A pointer to return the starting ioc bus address of the area to
781  * map.
782  *
783  * This is the common dma mapping routine.
784  */
785 
dma_sb_map_area(struct ps3_dma_region * r,unsigned long virt_addr,unsigned long len,dma_addr_t * bus_addr,u64 iopte_flag)786 static int dma_sb_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
787 	   unsigned long len, dma_addr_t *bus_addr,
788 	   u64 iopte_flag)
789 {
790 	int result;
791 	unsigned long flags;
792 	struct dma_chunk *c;
793 	unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
794 		: virt_addr;
795 	unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size);
796 	unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys,
797 					      1 << r->page_size);
798 	*bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
799 
800 	if (!USE_DYNAMIC_DMA) {
801 		unsigned long lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
802 		DBG(" -> %s:%d\n", __func__, __LINE__);
803 		DBG("%s:%d virt_addr %lxh\n", __func__, __LINE__,
804 			virt_addr);
805 		DBG("%s:%d phys_addr %lxh\n", __func__, __LINE__,
806 			phys_addr);
807 		DBG("%s:%d lpar_addr %lxh\n", __func__, __LINE__,
808 			lpar_addr);
809 		DBG("%s:%d len       %lxh\n", __func__, __LINE__, len);
810 		DBG("%s:%d bus_addr  %llxh (%lxh)\n", __func__, __LINE__,
811 		*bus_addr, len);
812 	}
813 
814 	spin_lock_irqsave(&r->chunk_list.lock, flags);
815 	c = dma_find_chunk(r, *bus_addr, len);
816 
817 	if (c) {
818 		DBG("%s:%d: reusing mapped chunk", __func__, __LINE__);
819 		dma_dump_chunk(c);
820 		c->usage_count++;
821 		spin_unlock_irqrestore(&r->chunk_list.lock, flags);
822 		return 0;
823 	}
824 
825 	result = dma_sb_map_pages(r, aligned_phys, aligned_len, &c, iopte_flag);
826 
827 	if (result) {
828 		*bus_addr = 0;
829 		DBG("%s:%d: dma_sb_map_pages failed (%d)\n",
830 			__func__, __LINE__, result);
831 		spin_unlock_irqrestore(&r->chunk_list.lock, flags);
832 		return result;
833 	}
834 
835 	c->usage_count = 1;
836 
837 	spin_unlock_irqrestore(&r->chunk_list.lock, flags);
838 	return result;
839 }
840 
dma_ioc0_map_area(struct ps3_dma_region * r,unsigned long virt_addr,unsigned long len,dma_addr_t * bus_addr,u64 iopte_flag)841 static int dma_ioc0_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
842 	     unsigned long len, dma_addr_t *bus_addr,
843 	     u64 iopte_flag)
844 {
845 	int result;
846 	unsigned long flags;
847 	struct dma_chunk *c;
848 	unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
849 		: virt_addr;
850 	unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size);
851 	unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys,
852 					      1 << r->page_size);
853 
854 	DBG(KERN_ERR "%s: vaddr=%#lx, len=%#lx\n", __func__,
855 	    virt_addr, len);
856 	DBG(KERN_ERR "%s: ph=%#lx a_ph=%#lx a_l=%#lx\n", __func__,
857 	    phys_addr, aligned_phys, aligned_len);
858 
859 	spin_lock_irqsave(&r->chunk_list.lock, flags);
860 	c = dma_find_chunk_lpar(r, ps3_mm_phys_to_lpar(phys_addr), len);
861 
862 	if (c) {
863 		/* FIXME */
864 		BUG();
865 		*bus_addr = c->bus_addr + phys_addr - aligned_phys;
866 		c->usage_count++;
867 		spin_unlock_irqrestore(&r->chunk_list.lock, flags);
868 		return 0;
869 	}
870 
871 	result = dma_ioc0_map_pages(r, aligned_phys, aligned_len, &c,
872 				    iopte_flag);
873 
874 	if (result) {
875 		*bus_addr = 0;
876 		DBG("%s:%d: dma_ioc0_map_pages failed (%d)\n",
877 			__func__, __LINE__, result);
878 		spin_unlock_irqrestore(&r->chunk_list.lock, flags);
879 		return result;
880 	}
881 	*bus_addr = c->bus_addr + phys_addr - aligned_phys;
882 	DBG("%s: va=%#lx pa=%#lx a_pa=%#lx bus=%#llx\n", __func__,
883 	    virt_addr, phys_addr, aligned_phys, *bus_addr);
884 	c->usage_count = 1;
885 
886 	spin_unlock_irqrestore(&r->chunk_list.lock, flags);
887 	return result;
888 }
889 
890 /**
891  * dma_sb_unmap_area - Unmap an area of memory from a device dma region.
892  * @r: Pointer to a struct ps3_dma_region.
893  * @bus_addr: The starting ioc bus address of the area to unmap.
894  * @len: Length in bytes of the area to unmap.
895  *
896  * This is the common dma unmap routine.
897  */
898 
dma_sb_unmap_area(struct ps3_dma_region * r,dma_addr_t bus_addr,unsigned long len)899 static int dma_sb_unmap_area(struct ps3_dma_region *r, dma_addr_t bus_addr,
900 	unsigned long len)
901 {
902 	unsigned long flags;
903 	struct dma_chunk *c;
904 
905 	spin_lock_irqsave(&r->chunk_list.lock, flags);
906 	c = dma_find_chunk(r, bus_addr, len);
907 
908 	if (!c) {
909 		unsigned long aligned_bus = _ALIGN_DOWN(bus_addr,
910 			1 << r->page_size);
911 		unsigned long aligned_len = _ALIGN_UP(len + bus_addr
912 			- aligned_bus, 1 << r->page_size);
913 		DBG("%s:%d: not found: bus_addr %llxh\n",
914 			__func__, __LINE__, bus_addr);
915 		DBG("%s:%d: not found: len %lxh\n",
916 			__func__, __LINE__, len);
917 		DBG("%s:%d: not found: aligned_bus %lxh\n",
918 			__func__, __LINE__, aligned_bus);
919 		DBG("%s:%d: not found: aligned_len %lxh\n",
920 			__func__, __LINE__, aligned_len);
921 		BUG();
922 	}
923 
924 	c->usage_count--;
925 
926 	if (!c->usage_count) {
927 		list_del(&c->link);
928 		dma_sb_free_chunk(c);
929 	}
930 
931 	spin_unlock_irqrestore(&r->chunk_list.lock, flags);
932 	return 0;
933 }
934 
dma_ioc0_unmap_area(struct ps3_dma_region * r,dma_addr_t bus_addr,unsigned long len)935 static int dma_ioc0_unmap_area(struct ps3_dma_region *r,
936 			dma_addr_t bus_addr, unsigned long len)
937 {
938 	unsigned long flags;
939 	struct dma_chunk *c;
940 
941 	DBG("%s: start a=%#llx l=%#lx\n", __func__, bus_addr, len);
942 	spin_lock_irqsave(&r->chunk_list.lock, flags);
943 	c = dma_find_chunk(r, bus_addr, len);
944 
945 	if (!c) {
946 		unsigned long aligned_bus = _ALIGN_DOWN(bus_addr,
947 							1 << r->page_size);
948 		unsigned long aligned_len = _ALIGN_UP(len + bus_addr
949 						      - aligned_bus,
950 						      1 << r->page_size);
951 		DBG("%s:%d: not found: bus_addr %llxh\n",
952 		    __func__, __LINE__, bus_addr);
953 		DBG("%s:%d: not found: len %lxh\n",
954 		    __func__, __LINE__, len);
955 		DBG("%s:%d: not found: aligned_bus %lxh\n",
956 		    __func__, __LINE__, aligned_bus);
957 		DBG("%s:%d: not found: aligned_len %lxh\n",
958 		    __func__, __LINE__, aligned_len);
959 		BUG();
960 	}
961 
962 	c->usage_count--;
963 
964 	if (!c->usage_count) {
965 		list_del(&c->link);
966 		dma_ioc0_free_chunk(c);
967 	}
968 
969 	spin_unlock_irqrestore(&r->chunk_list.lock, flags);
970 	DBG("%s: end\n", __func__);
971 	return 0;
972 }
973 
974 /**
975  * dma_sb_region_create_linear - Setup a linear dma mapping for a device.
976  * @r: Pointer to a struct ps3_dma_region.
977  *
978  * This routine creates an HV dma region for the device and maps all available
979  * ram into the io controller bus address space.
980  */
981 
dma_sb_region_create_linear(struct ps3_dma_region * r)982 static int dma_sb_region_create_linear(struct ps3_dma_region *r)
983 {
984 	int result;
985 	unsigned long virt_addr, len;
986 	dma_addr_t tmp;
987 
988 	if (r->len > 16*1024*1024) {	/* FIXME: need proper fix */
989 		/* force 16M dma pages for linear mapping */
990 		if (r->page_size != PS3_DMA_16M) {
991 			pr_info("%s:%d: forcing 16M pages for linear map\n",
992 				__func__, __LINE__);
993 			r->page_size = PS3_DMA_16M;
994 			r->len = _ALIGN_UP(r->len, 1 << r->page_size);
995 		}
996 	}
997 
998 	result = dma_sb_region_create(r);
999 	BUG_ON(result);
1000 
1001 	if (r->offset < map.rm.size) {
1002 		/* Map (part of) 1st RAM chunk */
1003 		virt_addr = map.rm.base + r->offset;
1004 		len = map.rm.size - r->offset;
1005 		if (len > r->len)
1006 			len = r->len;
1007 		result = dma_sb_map_area(r, virt_addr, len, &tmp,
1008 			CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_SO_RW |
1009 			CBE_IOPTE_M);
1010 		BUG_ON(result);
1011 	}
1012 
1013 	if (r->offset + r->len > map.rm.size) {
1014 		/* Map (part of) 2nd RAM chunk */
1015 		virt_addr = map.rm.size;
1016 		len = r->len;
1017 		if (r->offset >= map.rm.size)
1018 			virt_addr += r->offset - map.rm.size;
1019 		else
1020 			len -= map.rm.size - r->offset;
1021 		result = dma_sb_map_area(r, virt_addr, len, &tmp,
1022 			CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_SO_RW |
1023 			CBE_IOPTE_M);
1024 		BUG_ON(result);
1025 	}
1026 
1027 	return result;
1028 }
1029 
1030 /**
1031  * dma_sb_region_free_linear - Free a linear dma mapping for a device.
1032  * @r: Pointer to a struct ps3_dma_region.
1033  *
1034  * This routine will unmap all mapped areas and free the HV dma region.
1035  */
1036 
dma_sb_region_free_linear(struct ps3_dma_region * r)1037 static int dma_sb_region_free_linear(struct ps3_dma_region *r)
1038 {
1039 	int result;
1040 	dma_addr_t bus_addr;
1041 	unsigned long len, lpar_addr;
1042 
1043 	if (r->offset < map.rm.size) {
1044 		/* Unmap (part of) 1st RAM chunk */
1045 		lpar_addr = map.rm.base + r->offset;
1046 		len = map.rm.size - r->offset;
1047 		if (len > r->len)
1048 			len = r->len;
1049 		bus_addr = dma_sb_lpar_to_bus(r, lpar_addr);
1050 		result = dma_sb_unmap_area(r, bus_addr, len);
1051 		BUG_ON(result);
1052 	}
1053 
1054 	if (r->offset + r->len > map.rm.size) {
1055 		/* Unmap (part of) 2nd RAM chunk */
1056 		lpar_addr = map.r1.base;
1057 		len = r->len;
1058 		if (r->offset >= map.rm.size)
1059 			lpar_addr += r->offset - map.rm.size;
1060 		else
1061 			len -= map.rm.size - r->offset;
1062 		bus_addr = dma_sb_lpar_to_bus(r, lpar_addr);
1063 		result = dma_sb_unmap_area(r, bus_addr, len);
1064 		BUG_ON(result);
1065 	}
1066 
1067 	result = dma_sb_region_free(r);
1068 	BUG_ON(result);
1069 
1070 	return result;
1071 }
1072 
1073 /**
1074  * dma_sb_map_area_linear - Map an area of memory into a device dma region.
1075  * @r: Pointer to a struct ps3_dma_region.
1076  * @virt_addr: Starting virtual address of the area to map.
1077  * @len: Length in bytes of the area to map.
1078  * @bus_addr: A pointer to return the starting ioc bus address of the area to
1079  * map.
1080  *
1081  * This routine just returns the corresponding bus address.  Actual mapping
1082  * occurs in dma_region_create_linear().
1083  */
1084 
dma_sb_map_area_linear(struct ps3_dma_region * r,unsigned long virt_addr,unsigned long len,dma_addr_t * bus_addr,u64 iopte_flag)1085 static int dma_sb_map_area_linear(struct ps3_dma_region *r,
1086 	unsigned long virt_addr, unsigned long len, dma_addr_t *bus_addr,
1087 	u64 iopte_flag)
1088 {
1089 	unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
1090 		: virt_addr;
1091 	*bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
1092 	return 0;
1093 }
1094 
1095 /**
1096  * dma_unmap_area_linear - Unmap an area of memory from a device dma region.
1097  * @r: Pointer to a struct ps3_dma_region.
1098  * @bus_addr: The starting ioc bus address of the area to unmap.
1099  * @len: Length in bytes of the area to unmap.
1100  *
1101  * This routine does nothing.  Unmapping occurs in dma_sb_region_free_linear().
1102  */
1103 
dma_sb_unmap_area_linear(struct ps3_dma_region * r,dma_addr_t bus_addr,unsigned long len)1104 static int dma_sb_unmap_area_linear(struct ps3_dma_region *r,
1105 	dma_addr_t bus_addr, unsigned long len)
1106 {
1107 	return 0;
1108 };
1109 
1110 static const struct ps3_dma_region_ops ps3_dma_sb_region_ops =  {
1111 	.create = dma_sb_region_create,
1112 	.free = dma_sb_region_free,
1113 	.map = dma_sb_map_area,
1114 	.unmap = dma_sb_unmap_area
1115 };
1116 
1117 static const struct ps3_dma_region_ops ps3_dma_sb_region_linear_ops = {
1118 	.create = dma_sb_region_create_linear,
1119 	.free = dma_sb_region_free_linear,
1120 	.map = dma_sb_map_area_linear,
1121 	.unmap = dma_sb_unmap_area_linear
1122 };
1123 
1124 static const struct ps3_dma_region_ops ps3_dma_ioc0_region_ops = {
1125 	.create = dma_ioc0_region_create,
1126 	.free = dma_ioc0_region_free,
1127 	.map = dma_ioc0_map_area,
1128 	.unmap = dma_ioc0_unmap_area
1129 };
1130 
ps3_dma_region_init(struct ps3_system_bus_device * dev,struct ps3_dma_region * r,enum ps3_dma_page_size page_size,enum ps3_dma_region_type region_type,void * addr,unsigned long len)1131 int ps3_dma_region_init(struct ps3_system_bus_device *dev,
1132 	struct ps3_dma_region *r, enum ps3_dma_page_size page_size,
1133 	enum ps3_dma_region_type region_type, void *addr, unsigned long len)
1134 {
1135 	unsigned long lpar_addr;
1136 	int result;
1137 
1138 	lpar_addr = addr ? ps3_mm_phys_to_lpar(__pa(addr)) : 0;
1139 
1140 	r->dev = dev;
1141 	r->page_size = page_size;
1142 	r->region_type = region_type;
1143 	r->offset = lpar_addr;
1144 	if (r->offset >= map.rm.size)
1145 		r->offset -= map.r1.offset;
1146 	r->len = len ? len : _ALIGN_UP(map.total, 1 << r->page_size);
1147 
1148 	dev->core.dma_mask = &r->dma_mask;
1149 
1150 	result = dma_set_mask_and_coherent(&dev->core, DMA_BIT_MASK(32));
1151 
1152 	if (result < 0) {
1153 		dev_err(&dev->core, "%s:%d: dma_set_mask_and_coherent failed: %d\n",
1154 			__func__, __LINE__, result);
1155 		return result;
1156 	}
1157 
1158 	switch (dev->dev_type) {
1159 	case PS3_DEVICE_TYPE_SB:
1160 		r->region_ops =  (USE_DYNAMIC_DMA)
1161 			? &ps3_dma_sb_region_ops
1162 			: &ps3_dma_sb_region_linear_ops;
1163 		break;
1164 	case PS3_DEVICE_TYPE_IOC0:
1165 		r->region_ops = &ps3_dma_ioc0_region_ops;
1166 		break;
1167 	default:
1168 		BUG();
1169 		return -EINVAL;
1170 	}
1171 	return 0;
1172 }
1173 EXPORT_SYMBOL(ps3_dma_region_init);
1174 
ps3_dma_region_create(struct ps3_dma_region * r)1175 int ps3_dma_region_create(struct ps3_dma_region *r)
1176 {
1177 	BUG_ON(!r);
1178 	BUG_ON(!r->region_ops);
1179 	BUG_ON(!r->region_ops->create);
1180 	return r->region_ops->create(r);
1181 }
1182 EXPORT_SYMBOL(ps3_dma_region_create);
1183 
ps3_dma_region_free(struct ps3_dma_region * r)1184 int ps3_dma_region_free(struct ps3_dma_region *r)
1185 {
1186 	BUG_ON(!r);
1187 	BUG_ON(!r->region_ops);
1188 	BUG_ON(!r->region_ops->free);
1189 	return r->region_ops->free(r);
1190 }
1191 EXPORT_SYMBOL(ps3_dma_region_free);
1192 
ps3_dma_map(struct ps3_dma_region * r,unsigned long virt_addr,unsigned long len,dma_addr_t * bus_addr,u64 iopte_flag)1193 int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr,
1194 	unsigned long len, dma_addr_t *bus_addr,
1195 	u64 iopte_flag)
1196 {
1197 	return r->region_ops->map(r, virt_addr, len, bus_addr, iopte_flag);
1198 }
1199 
ps3_dma_unmap(struct ps3_dma_region * r,dma_addr_t bus_addr,unsigned long len)1200 int ps3_dma_unmap(struct ps3_dma_region *r, dma_addr_t bus_addr,
1201 	unsigned long len)
1202 {
1203 	return r->region_ops->unmap(r, bus_addr, len);
1204 }
1205 
1206 /*============================================================================*/
1207 /* system startup routines                                                    */
1208 /*============================================================================*/
1209 
1210 /**
1211  * ps3_mm_init - initialize the address space state variables
1212  */
1213 
ps3_mm_init(void)1214 void __init ps3_mm_init(void)
1215 {
1216 	int result;
1217 
1218 	DBG(" -> %s:%d\n", __func__, __LINE__);
1219 
1220 	result = ps3_repository_read_mm_info(&map.rm.base, &map.rm.size,
1221 		&map.total);
1222 
1223 	if (result)
1224 		panic("ps3_repository_read_mm_info() failed");
1225 
1226 	map.rm.offset = map.rm.base;
1227 	map.vas_id = map.htab_size = 0;
1228 
1229 	/* this implementation assumes map.rm.base is zero */
1230 
1231 	BUG_ON(map.rm.base);
1232 	BUG_ON(!map.rm.size);
1233 
1234 	/* Check if we got the highmem region from an earlier boot step */
1235 
1236 	if (ps3_mm_get_repository_highmem(&map.r1)) {
1237 		result = ps3_mm_region_create(&map.r1, map.total - map.rm.size);
1238 
1239 		if (!result)
1240 			ps3_mm_set_repository_highmem(&map.r1);
1241 	}
1242 
1243 	/* correct map.total for the real total amount of memory we use */
1244 	map.total = map.rm.size + map.r1.size;
1245 
1246 	if (!map.r1.size) {
1247 		DBG("%s:%d: No highmem region found\n", __func__, __LINE__);
1248 	} else {
1249 		DBG("%s:%d: Adding highmem region: %llxh %llxh\n",
1250 			__func__, __LINE__, map.rm.size,
1251 			map.total - map.rm.size);
1252 		memblock_add(map.rm.size, map.total - map.rm.size);
1253 	}
1254 
1255 	DBG(" <- %s:%d\n", __func__, __LINE__);
1256 }
1257 
1258 /**
1259  * ps3_mm_shutdown - final cleanup of address space
1260  */
1261 
ps3_mm_shutdown(void)1262 void ps3_mm_shutdown(void)
1263 {
1264 	ps3_mm_region_destroy(&map.r1);
1265 }
1266