• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* drivers/misc/memory_state_time.c
2  *
3  * Copyright (C) 2016 Google, Inc.
4  *
5  * This software is licensed under the terms of the GNU General Public
6  * License version 2, as published by the Free Software Foundation, and
7  * may be copied, distributed, and modified under those terms.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  */
15 
16 #include <linux/device.h>
17 #include <linux/err.h>
18 #include <linux/errno.h>
19 #include <linux/hashtable.h>
20 #include <linux/kconfig.h>
21 #include <linux/kernel.h>
22 #include <linux/kobject.h>
23 #include <linux/memory-state-time.h>
24 #include <linux/module.h>
25 #include <linux/mutex.h>
26 #include <linux/of_platform.h>
27 #include <linux/slab.h>
28 #include <linux/sysfs.h>
29 #include <linux/time.h>
30 #include <linux/timekeeping.h>
31 #include <linux/workqueue.h>
32 
33 #define KERNEL_ATTR_RO(_name) \
34 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
35 
36 #define KERNEL_ATTR_RW(_name) \
37 static struct kobj_attribute _name##_attr = \
38 	__ATTR(_name, 0644, _name##_show, _name##_store)
39 
40 #define FREQ_HASH_BITS 4
41 DECLARE_HASHTABLE(freq_hash_table, FREQ_HASH_BITS);
42 
43 static DEFINE_MUTEX(mem_lock);
44 
45 #define TAG "memory_state_time"
46 #define BW_NODE "/soc/memory-state-time"
47 #define FREQ_TBL "freq-tbl"
48 #define BW_TBL "bw-buckets"
49 #define NUM_SOURCES "num-sources"
50 
51 #define LOWEST_FREQ 2
52 
53 static int curr_bw;
54 static int curr_freq;
55 static u32 *bw_buckets;
56 static u32 *freq_buckets;
57 static int num_freqs;
58 static int num_buckets;
59 static int registered_bw_sources;
60 static u64 last_update;
61 static bool init_success;
62 static struct workqueue_struct *memory_wq;
63 static u32 num_sources = 10;
64 static int *bandwidths;
65 
66 struct freq_entry {
67 	int freq;
68 	u64 *buckets; /* Bandwidth buckets. */
69 	struct hlist_node hash;
70 };
71 
72 struct queue_container {
73 	struct work_struct update_state;
74 	int value;
75 	u64 time_now;
76 	int id;
77 	struct mutex *lock;
78 };
79 
find_bucket(int bw)80 static int find_bucket(int bw)
81 {
82 	int i;
83 
84 	if (bw_buckets != NULL) {
85 		for (i = 0; i < num_buckets; i++) {
86 			if (bw_buckets[i] > bw) {
87 				pr_debug("Found bucket %d for bandwidth %d\n",
88 					i, bw);
89 				return i;
90 			}
91 		}
92 		return num_buckets - 1;
93 	}
94 	return 0;
95 }
96 
get_time_diff(u64 time_now)97 static u64 get_time_diff(u64 time_now)
98 {
99 	u64 ms;
100 
101 	ms = time_now - last_update;
102 	last_update = time_now;
103 	return ms;
104 }
105 
show_stat_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)106 static ssize_t show_stat_show(struct kobject *kobj,
107 		struct kobj_attribute *attr, char *buf)
108 {
109 	int i, j;
110 	int len = 0;
111 	struct freq_entry *freq_entry;
112 
113 	for (i = 0; i < num_freqs; i++) {
114 		hash_for_each_possible(freq_hash_table, freq_entry, hash,
115 				freq_buckets[i]) {
116 			if (freq_entry->freq == freq_buckets[i]) {
117 				len += scnprintf(buf + len, PAGE_SIZE - len,
118 						"%d ", freq_buckets[i]);
119 				if (len >= PAGE_SIZE)
120 					break;
121 				for (j = 0; j < num_buckets; j++) {
122 					len += scnprintf(buf + len,
123 							PAGE_SIZE - len,
124 							"%llu ",
125 							freq_entry->buckets[j]);
126 				}
127 				len += scnprintf(buf + len, PAGE_SIZE - len,
128 						"\n");
129 			}
130 		}
131 	}
132 	pr_debug("Current Time: %llu\n", ktime_get_boot_ns());
133 	return len;
134 }
135 KERNEL_ATTR_RO(show_stat);
136 
update_table(u64 time_now)137 static void update_table(u64 time_now)
138 {
139 	struct freq_entry *freq_entry;
140 
141 	pr_debug("Last known bw %d freq %d\n", curr_bw, curr_freq);
142 	hash_for_each_possible(freq_hash_table, freq_entry, hash, curr_freq) {
143 		if (curr_freq == freq_entry->freq) {
144 			freq_entry->buckets[find_bucket(curr_bw)]
145 					+= get_time_diff(time_now);
146 			break;
147 		}
148 	}
149 }
150 
freq_exists(int freq)151 static bool freq_exists(int freq)
152 {
153 	int i;
154 
155 	for (i = 0; i < num_freqs; i++) {
156 		if (freq == freq_buckets[i])
157 			return true;
158 	}
159 	return false;
160 }
161 
calculate_total_bw(int bw,int index)162 static int calculate_total_bw(int bw, int index)
163 {
164 	int i;
165 	int total_bw = 0;
166 
167 	pr_debug("memory_state_time New bw %d for id %d\n", bw, index);
168 	bandwidths[index] = bw;
169 	for (i = 0; i < registered_bw_sources; i++)
170 		total_bw += bandwidths[i];
171 	return total_bw;
172 }
173 
freq_update_do_work(struct work_struct * work)174 static void freq_update_do_work(struct work_struct *work)
175 {
176 	struct queue_container *freq_state_update
177 			= container_of(work, struct queue_container,
178 			update_state);
179 	if (freq_state_update) {
180 		mutex_lock(&mem_lock);
181 		update_table(freq_state_update->time_now);
182 		curr_freq = freq_state_update->value;
183 		mutex_unlock(&mem_lock);
184 		kfree(freq_state_update);
185 	}
186 }
187 
bw_update_do_work(struct work_struct * work)188 static void bw_update_do_work(struct work_struct *work)
189 {
190 	struct queue_container *bw_state_update
191 			= container_of(work, struct queue_container,
192 			update_state);
193 	if (bw_state_update) {
194 		mutex_lock(&mem_lock);
195 		update_table(bw_state_update->time_now);
196 		curr_bw = calculate_total_bw(bw_state_update->value,
197 				bw_state_update->id);
198 		mutex_unlock(&mem_lock);
199 		kfree(bw_state_update);
200 	}
201 }
202 
memory_state_freq_update(struct memory_state_update_block * ub,int value)203 static void memory_state_freq_update(struct memory_state_update_block *ub,
204 		int value)
205 {
206 	if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) {
207 		if (freq_exists(value) && init_success) {
208 			struct queue_container *freq_container
209 				= kmalloc(sizeof(struct queue_container),
210 				GFP_KERNEL);
211 			if (!freq_container)
212 				return;
213 			INIT_WORK(&freq_container->update_state,
214 					freq_update_do_work);
215 			freq_container->time_now = ktime_get_boot_ns();
216 			freq_container->value = value;
217 			pr_debug("Scheduling freq update in work queue\n");
218 			queue_work(memory_wq, &freq_container->update_state);
219 		} else {
220 			pr_debug("Freq does not exist.\n");
221 		}
222 	}
223 }
224 
memory_state_bw_update(struct memory_state_update_block * ub,int value)225 static void memory_state_bw_update(struct memory_state_update_block *ub,
226 		int value)
227 {
228 	if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) {
229 		if (init_success) {
230 			struct queue_container *bw_container
231 				= kmalloc(sizeof(struct queue_container),
232 				GFP_KERNEL);
233 			if (!bw_container)
234 				return;
235 			INIT_WORK(&bw_container->update_state,
236 					bw_update_do_work);
237 			bw_container->time_now = ktime_get_boot_ns();
238 			bw_container->value = value;
239 			bw_container->id = ub->id;
240 			pr_debug("Scheduling bandwidth update in work queue\n");
241 			queue_work(memory_wq, &bw_container->update_state);
242 		}
243 	}
244 }
245 
memory_state_register_frequency_source(void)246 struct memory_state_update_block *memory_state_register_frequency_source(void)
247 {
248 	struct memory_state_update_block *block;
249 
250 	if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) {
251 		pr_debug("Allocating frequency source\n");
252 		block = kmalloc(sizeof(struct memory_state_update_block),
253 					GFP_KERNEL);
254 		if (!block)
255 			return NULL;
256 		block->update_call = memory_state_freq_update;
257 		return block;
258 	}
259 	pr_err("Config option disabled.\n");
260 	return NULL;
261 }
262 EXPORT_SYMBOL_GPL(memory_state_register_frequency_source);
263 
memory_state_register_bandwidth_source(void)264 struct memory_state_update_block *memory_state_register_bandwidth_source(void)
265 {
266 	struct memory_state_update_block *block;
267 
268 	if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) {
269 		pr_debug("Allocating bandwidth source %d\n",
270 				registered_bw_sources);
271 		block = kmalloc(sizeof(struct memory_state_update_block),
272 					GFP_KERNEL);
273 		if (!block)
274 			return NULL;
275 		block->update_call = memory_state_bw_update;
276 		if (registered_bw_sources < num_sources) {
277 			block->id = registered_bw_sources++;
278 		} else {
279 			pr_err("Unable to allocate source; max number reached\n");
280 			kfree(block);
281 			return NULL;
282 		}
283 		return block;
284 	}
285 	pr_err("Config option disabled.\n");
286 	return NULL;
287 }
288 EXPORT_SYMBOL_GPL(memory_state_register_bandwidth_source);
289 
290 /* Buckets are designated by their maximum.
291  * Returns the buckets decided by the capability of the device.
292  */
get_bw_buckets(struct device * dev)293 static int get_bw_buckets(struct device *dev)
294 {
295 	int ret, lenb;
296 	struct device_node *node = dev->of_node;
297 
298 	of_property_read_u32(node, NUM_SOURCES, &num_sources);
299 	if (!of_find_property(node, BW_TBL, &lenb)) {
300 		pr_err("Missing %s property\n", BW_TBL);
301 		return -ENODATA;
302 	}
303 
304 	bandwidths = devm_kzalloc(dev,
305 			sizeof(*bandwidths) * num_sources, GFP_KERNEL);
306 	if (!bandwidths)
307 		return -ENOMEM;
308 	lenb /= sizeof(*bw_buckets);
309 	bw_buckets = devm_kzalloc(dev, lenb * sizeof(*bw_buckets),
310 			GFP_KERNEL);
311 	if (!bw_buckets) {
312 		devm_kfree(dev, bandwidths);
313 		return -ENOMEM;
314 	}
315 	ret = of_property_read_u32_array(node, BW_TBL, bw_buckets,
316 			lenb);
317 	if (ret < 0) {
318 		devm_kfree(dev, bandwidths);
319 		devm_kfree(dev, bw_buckets);
320 		pr_err("Unable to read bandwidth table from device tree.\n");
321 		return ret;
322 	}
323 
324 	curr_bw = 0;
325 	num_buckets = lenb;
326 	return 0;
327 }
328 
329 /* Adds struct freq_entry nodes to the hashtable for each compatible frequency.
330  * Returns the supported number of frequencies.
331  */
freq_buckets_init(struct device * dev)332 static int freq_buckets_init(struct device *dev)
333 {
334 	struct freq_entry *freq_entry;
335 	int i;
336 	int ret, lenf;
337 	struct device_node *node = dev->of_node;
338 
339 	if (!of_find_property(node, FREQ_TBL, &lenf)) {
340 		pr_err("Missing %s property\n", FREQ_TBL);
341 		return -ENODATA;
342 	}
343 
344 	lenf /= sizeof(*freq_buckets);
345 	freq_buckets = devm_kzalloc(dev, lenf * sizeof(*freq_buckets),
346 			GFP_KERNEL);
347 	if (!freq_buckets)
348 		return -ENOMEM;
349 	pr_debug("freqs found len %d\n", lenf);
350 	ret = of_property_read_u32_array(node, FREQ_TBL, freq_buckets,
351 			lenf);
352 	if (ret < 0) {
353 		devm_kfree(dev, freq_buckets);
354 		pr_err("Unable to read frequency table from device tree.\n");
355 		return ret;
356 	}
357 	pr_debug("ret freq %d\n", ret);
358 
359 	num_freqs = lenf;
360 	curr_freq = freq_buckets[LOWEST_FREQ];
361 
362 	for (i = 0; i < num_freqs; i++) {
363 		freq_entry = devm_kzalloc(dev, sizeof(struct freq_entry),
364 				GFP_KERNEL);
365 		if (!freq_entry)
366 			return -ENOMEM;
367 		freq_entry->buckets = devm_kzalloc(dev, sizeof(u64)*num_buckets,
368 				GFP_KERNEL);
369 		if (!freq_entry->buckets) {
370 			devm_kfree(dev, freq_entry);
371 			return -ENOMEM;
372 		}
373 		pr_debug("memory_state_time Adding freq to ht %d\n",
374 				freq_buckets[i]);
375 		freq_entry->freq = freq_buckets[i];
376 		hash_add(freq_hash_table, &freq_entry->hash, freq_buckets[i]);
377 	}
378 	return 0;
379 }
380 
381 struct kobject *memory_kobj;
382 EXPORT_SYMBOL_GPL(memory_kobj);
383 
384 static struct attribute *memory_attrs[] = {
385 	&show_stat_attr.attr,
386 	NULL
387 };
388 
389 static struct attribute_group memory_attr_group = {
390 	.attrs = memory_attrs,
391 };
392 
memory_state_time_probe(struct platform_device * pdev)393 static int memory_state_time_probe(struct platform_device *pdev)
394 {
395 	int error;
396 
397 	error = get_bw_buckets(&pdev->dev);
398 	if (error)
399 		return error;
400 	error = freq_buckets_init(&pdev->dev);
401 	if (error)
402 		return error;
403 	last_update = ktime_get_boot_ns();
404 	init_success = true;
405 
406 	pr_debug("memory_state_time initialized with num_freqs %d\n",
407 			num_freqs);
408 	return 0;
409 }
410 
411 static const struct of_device_id match_table[] = {
412 	{ .compatible = "memory-state-time" },
413 	{}
414 };
415 
416 static struct platform_driver memory_state_time_driver = {
417 	.probe = memory_state_time_probe,
418 	.driver = {
419 		.name = "memory-state-time",
420 		.of_match_table = match_table,
421 		.owner = THIS_MODULE,
422 	},
423 };
424 
memory_state_time_init(void)425 static int __init memory_state_time_init(void)
426 {
427 	int error;
428 
429 	hash_init(freq_hash_table);
430 	memory_wq = create_singlethread_workqueue("memory_wq");
431 	if (!memory_wq) {
432 		pr_err("Unable to create workqueue.\n");
433 		return -EINVAL;
434 	}
435 	/*
436 	 * Create sys/kernel directory for memory_state_time.
437 	 */
438 	memory_kobj = kobject_create_and_add(TAG, kernel_kobj);
439 	if (!memory_kobj) {
440 		pr_err("Unable to allocate memory_kobj for sysfs directory.\n");
441 		error = -ENOMEM;
442 		goto wq;
443 	}
444 	error = sysfs_create_group(memory_kobj, &memory_attr_group);
445 	if (error) {
446 		pr_err("Unable to create sysfs folder.\n");
447 		goto kobj;
448 	}
449 
450 	error = platform_driver_register(&memory_state_time_driver);
451 	if (error) {
452 		pr_err("Unable to register memory_state_time platform driver.\n");
453 		goto group;
454 	}
455 	return 0;
456 
457 group:	sysfs_remove_group(memory_kobj, &memory_attr_group);
458 kobj:	kobject_put(memory_kobj);
459 wq:	destroy_workqueue(memory_wq);
460 	return error;
461 }
462 module_init(memory_state_time_init);
463