1 /*
2 * Copyright IBM Corp. 2008
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
4 */
5
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <asm/ipl.h>
9 #include <asm/sclp.h>
10 #include <asm/setup.h>
11
tprot(unsigned long addr)12 static inline int tprot(unsigned long addr)
13 {
14 int rc = -EFAULT;
15
16 asm volatile(
17 " tprot 0(%1),0\n"
18 "0: ipm %0\n"
19 " srl %0,28\n"
20 "1:\n"
21 EX_TABLE(0b,1b)
22 : "+d" (rc) : "a" (addr) : "cc");
23 return rc;
24 }
25
26 #define ADDR2G (1ULL << 31)
27
find_memory_chunks(struct mem_chunk chunk[])28 static void find_memory_chunks(struct mem_chunk chunk[])
29 {
30 unsigned long long memsize, rnmax, rzm;
31 unsigned long addr = 0, size;
32 int i = 0, type;
33
34 rzm = sclp_get_rzm();
35 rnmax = sclp_get_rnmax();
36 memsize = rzm * rnmax;
37 if (!rzm)
38 rzm = 1ULL << 17;
39 if (sizeof(long) == 4) {
40 rzm = min(ADDR2G, rzm);
41 memsize = memsize ? min(ADDR2G, memsize) : ADDR2G;
42 }
43 do {
44 size = 0;
45 type = tprot(addr);
46 do {
47 size += rzm;
48 if (memsize && addr + size >= memsize)
49 break;
50 } while (type == tprot(addr + size));
51 if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) {
52 chunk[i].addr = addr;
53 chunk[i].size = size;
54 chunk[i].type = type;
55 i++;
56 }
57 addr += size;
58 } while (addr < memsize && i < MEMORY_CHUNKS);
59 }
60
detect_memory_layout(struct mem_chunk chunk[])61 void detect_memory_layout(struct mem_chunk chunk[])
62 {
63 unsigned long flags, cr0;
64
65 memset(chunk, 0, MEMORY_CHUNKS * sizeof(struct mem_chunk));
66 /* Disable IRQs, DAT and low address protection so tprot does the
67 * right thing and we don't get scheduled away with low address
68 * protection disabled.
69 */
70 flags = __raw_local_irq_stnsm(0xf8);
71 __ctl_store(cr0, 0, 0);
72 __ctl_clear_bit(0, 28);
73 find_memory_chunks(chunk);
74 __ctl_load(cr0, 0, 0);
75 __raw_local_irq_ssm(flags);
76 }
77 EXPORT_SYMBOL(detect_memory_layout);
78