1 /*
2 * Copyright (C) IBM Corporation, 2014, 2017
3 * Anton Blanchard, Rashmica Gupta.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11 #define pr_fmt(fmt) "memtrace: " fmt
12
13 #include <linux/bitops.h>
14 #include <linux/string.h>
15 #include <linux/memblock.h>
16 #include <linux/init.h>
17 #include <linux/moduleparam.h>
18 #include <linux/fs.h>
19 #include <linux/debugfs.h>
20 #include <linux/slab.h>
21 #include <linux/memory.h>
22 #include <linux/memory_hotplug.h>
23 #include <asm/machdep.h>
24 #include <asm/debugfs.h>
25
26 /* This enables us to keep track of the memory removed from each node. */
27 struct memtrace_entry {
28 void *mem;
29 u64 start;
30 u64 size;
31 u32 nid;
32 struct dentry *dir;
33 char name[16];
34 };
35
36 static u64 memtrace_size;
37
38 static struct memtrace_entry *memtrace_array;
39 static unsigned int memtrace_array_nr;
40
41
memtrace_read(struct file * filp,char __user * ubuf,size_t count,loff_t * ppos)42 static ssize_t memtrace_read(struct file *filp, char __user *ubuf,
43 size_t count, loff_t *ppos)
44 {
45 struct memtrace_entry *ent = filp->private_data;
46
47 return simple_read_from_buffer(ubuf, count, ppos, ent->mem, ent->size);
48 }
49
valid_memtrace_range(struct memtrace_entry * dev,unsigned long start,unsigned long size)50 static bool valid_memtrace_range(struct memtrace_entry *dev,
51 unsigned long start, unsigned long size)
52 {
53 if ((start >= dev->start) &&
54 ((start + size) <= (dev->start + dev->size)))
55 return true;
56
57 return false;
58 }
59
memtrace_mmap(struct file * filp,struct vm_area_struct * vma)60 static int memtrace_mmap(struct file *filp, struct vm_area_struct *vma)
61 {
62 unsigned long size = vma->vm_end - vma->vm_start;
63 struct memtrace_entry *dev = filp->private_data;
64
65 if (!valid_memtrace_range(dev, vma->vm_pgoff << PAGE_SHIFT, size))
66 return -EINVAL;
67
68 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
69
70 if (remap_pfn_range(vma, vma->vm_start,
71 vma->vm_pgoff + (dev->start >> PAGE_SHIFT),
72 size, vma->vm_page_prot))
73 return -EAGAIN;
74
75 return 0;
76 }
77
78 static const struct file_operations memtrace_fops = {
79 .llseek = default_llseek,
80 .read = memtrace_read,
81 .mmap = memtrace_mmap,
82 .open = simple_open,
83 };
84
check_memblock_online(struct memory_block * mem,void * arg)85 static int check_memblock_online(struct memory_block *mem, void *arg)
86 {
87 if (mem->state != MEM_ONLINE)
88 return -1;
89
90 return 0;
91 }
92
change_memblock_state(struct memory_block * mem,void * arg)93 static int change_memblock_state(struct memory_block *mem, void *arg)
94 {
95 unsigned long state = (unsigned long)arg;
96
97 mem->state = state;
98
99 return 0;
100 }
101
102 /* called with device_hotplug_lock held */
memtrace_offline_pages(u32 nid,u64 start_pfn,u64 nr_pages)103 static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages)
104 {
105 u64 end_pfn = start_pfn + nr_pages - 1;
106
107 if (walk_memory_range(start_pfn, end_pfn, NULL,
108 check_memblock_online))
109 return false;
110
111 walk_memory_range(start_pfn, end_pfn, (void *)MEM_GOING_OFFLINE,
112 change_memblock_state);
113
114 if (offline_pages(start_pfn, nr_pages)) {
115 walk_memory_range(start_pfn, end_pfn, (void *)MEM_ONLINE,
116 change_memblock_state);
117 return false;
118 }
119
120 walk_memory_range(start_pfn, end_pfn, (void *)MEM_OFFLINE,
121 change_memblock_state);
122
123
124 return true;
125 }
126
memtrace_alloc_node(u32 nid,u64 size)127 static u64 memtrace_alloc_node(u32 nid, u64 size)
128 {
129 u64 start_pfn, end_pfn, nr_pages, pfn;
130 u64 base_pfn;
131 u64 bytes = memory_block_size_bytes();
132
133 if (!NODE_DATA(nid) || !node_spanned_pages(nid))
134 return 0;
135
136 start_pfn = node_start_pfn(nid);
137 end_pfn = node_end_pfn(nid);
138 nr_pages = size >> PAGE_SHIFT;
139
140 /* Trace memory needs to be aligned to the size */
141 end_pfn = round_down(end_pfn - nr_pages, nr_pages);
142
143 lock_device_hotplug();
144 for (base_pfn = end_pfn; base_pfn > start_pfn; base_pfn -= nr_pages) {
145 if (memtrace_offline_pages(nid, base_pfn, nr_pages) == true) {
146 /*
147 * Remove memory in memory block size chunks so that
148 * iomem resources are always split to the same size and
149 * we never try to remove memory that spans two iomem
150 * resources.
151 */
152 end_pfn = base_pfn + nr_pages;
153 for (pfn = base_pfn; pfn < end_pfn; pfn += bytes>> PAGE_SHIFT) {
154 remove_memory(nid, pfn << PAGE_SHIFT, bytes);
155 }
156 unlock_device_hotplug();
157 return base_pfn << PAGE_SHIFT;
158 }
159 }
160 unlock_device_hotplug();
161
162 return 0;
163 }
164
memtrace_init_regions_runtime(u64 size)165 static int memtrace_init_regions_runtime(u64 size)
166 {
167 u32 nid;
168 u64 m;
169
170 memtrace_array = kcalloc(num_online_nodes(),
171 sizeof(struct memtrace_entry), GFP_KERNEL);
172 if (!memtrace_array) {
173 pr_err("Failed to allocate memtrace_array\n");
174 return -EINVAL;
175 }
176
177 for_each_online_node(nid) {
178 m = memtrace_alloc_node(nid, size);
179
180 /*
181 * A node might not have any local memory, so warn but
182 * continue on.
183 */
184 if (!m) {
185 pr_err("Failed to allocate trace memory on node %d\n", nid);
186 continue;
187 }
188
189 pr_info("Allocated trace memory on node %d at 0x%016llx\n", nid, m);
190
191 memtrace_array[memtrace_array_nr].start = m;
192 memtrace_array[memtrace_array_nr].size = size;
193 memtrace_array[memtrace_array_nr].nid = nid;
194 memtrace_array_nr++;
195 }
196
197 return 0;
198 }
199
200 static struct dentry *memtrace_debugfs_dir;
201
memtrace_init_debugfs(void)202 static int memtrace_init_debugfs(void)
203 {
204 int ret = 0;
205 int i;
206
207 for (i = 0; i < memtrace_array_nr; i++) {
208 struct dentry *dir;
209 struct memtrace_entry *ent = &memtrace_array[i];
210
211 ent->mem = ioremap(ent->start, ent->size);
212 /* Warn but continue on */
213 if (!ent->mem) {
214 pr_err("Failed to map trace memory at 0x%llx\n",
215 ent->start);
216 ret = -1;
217 continue;
218 }
219
220 snprintf(ent->name, 16, "%08x", ent->nid);
221 dir = debugfs_create_dir(ent->name, memtrace_debugfs_dir);
222 if (!dir)
223 return -1;
224
225 ent->dir = dir;
226 debugfs_create_file("trace", 0400, dir, ent, &memtrace_fops);
227 debugfs_create_x64("start", 0400, dir, &ent->start);
228 debugfs_create_x64("size", 0400, dir, &ent->size);
229 }
230
231 return ret;
232 }
233
memtrace_enable_set(void * data,u64 val)234 static int memtrace_enable_set(void *data, u64 val)
235 {
236 if (memtrace_size)
237 return -EINVAL;
238
239 if (!val)
240 return -EINVAL;
241
242 /* Make sure size is aligned to a memory block */
243 if (val & (memory_block_size_bytes() - 1))
244 return -EINVAL;
245
246 if (memtrace_init_regions_runtime(val))
247 return -EINVAL;
248
249 if (memtrace_init_debugfs())
250 return -EINVAL;
251
252 memtrace_size = val;
253
254 return 0;
255 }
256
memtrace_enable_get(void * data,u64 * val)257 static int memtrace_enable_get(void *data, u64 *val)
258 {
259 *val = memtrace_size;
260 return 0;
261 }
262
263 DEFINE_SIMPLE_ATTRIBUTE(memtrace_init_fops, memtrace_enable_get,
264 memtrace_enable_set, "0x%016llx\n");
265
memtrace_init(void)266 static int memtrace_init(void)
267 {
268 memtrace_debugfs_dir = debugfs_create_dir("memtrace",
269 powerpc_debugfs_root);
270 if (!memtrace_debugfs_dir)
271 return -1;
272
273 debugfs_create_file("enable", 0600, memtrace_debugfs_dir,
274 NULL, &memtrace_init_fops);
275
276 return 0;
277 }
278 machine_device_initcall(powernv, memtrace_init);
279