• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * @file op_dname.c
3  * dentry stack walking
4  *
5  * @remark Copyright 2002 OProfile authors
6  * @remark Read the file COPYING
7  *
8  * @author John Levon
9  * @author Philippe Elie
10  */
11 
12 #include <linux/sched.h>
13 #include <linux/unistd.h>
14 #include <linux/mman.h>
15 #include <linux/file.h>
16 
17 #include "oprofile.h"
18 #include "op_dcache.h"
19 #include "op_util.h"
20 
21 /* --------- device routines ------------- */
22 
23 uint op_dname_top;
24 struct qstr ** op_dname_stack;
25 char * op_pool_pos;
26 char * op_pool_start;
27 char * op_pool_end;
28 
29 static ulong hash_map_open;
30 static struct op_hash_index * hash_map;
31 
is_map_ready(void)32 unsigned long is_map_ready(void)
33 {
34 	return hash_map_open;
35 }
36 
oprof_init_hashmap(void)37 int oprof_init_hashmap(void)
38 {
39 	uint i;
40 
41 	op_dname_stack = kmalloc(DNAME_STACK_MAX * sizeof(struct qstr *), GFP_KERNEL);
42 	if (!op_dname_stack)
43 		return -EFAULT;
44 	op_dname_top = 0;
45 	memset(op_dname_stack, 0, DNAME_STACK_MAX * sizeof(struct qstr *));
46 
47 	hash_map = rvmalloc(PAGE_ALIGN(OP_HASH_MAP_SIZE));
48 	if (!hash_map)
49 		return -EFAULT;
50 
51 	for (i = 0; i < OP_HASH_MAP_NR; ++i) {
52 		hash_map[i].name = 0;
53 		hash_map[i].parent = -1;
54 	}
55 
56 	op_pool_start = (char *)(hash_map + OP_HASH_MAP_NR);
57 	op_pool_end = op_pool_start + POOL_SIZE;
58 	op_pool_pos = op_pool_start;
59 
60 	/* Ensure that the zero hash map entry is never used, we use this
61 	 * value as end of path terminator */
62 	hash_map[0].name = alloc_in_pool("/", 1);
63 	hash_map[0].parent = 0;
64 
65 	return 0;
66 }
67 
oprof_free_hashmap(void)68 void oprof_free_hashmap(void)
69 {
70 	kfree(op_dname_stack);
71 	rvfree(hash_map, PAGE_ALIGN(OP_HASH_MAP_SIZE));
72 }
73 
oprof_hash_map_open(void)74 int oprof_hash_map_open(void)
75 {
76 	if (test_and_set_bit(0, &hash_map_open))
77 		return -EBUSY;
78 
79 	return 0;
80 }
81 
oprof_hash_map_release(void)82 int oprof_hash_map_release(void)
83 {
84 	if (!hash_map_open)
85 		return -EFAULT;
86 
87 	clear_bit(0, &hash_map_open);
88 	return 0;
89 }
90 
oprof_hash_map_mmap(struct file * file,struct vm_area_struct * vma)91 int oprof_hash_map_mmap(struct file * file, struct vm_area_struct * vma)
92 {
93 	ulong start = (ulong)vma->vm_start;
94 	ulong page, pos;
95 	ulong size = (ulong)(vma->vm_end-vma->vm_start);
96 
97 	if (size > PAGE_ALIGN(OP_HASH_MAP_SIZE) || (vma->vm_flags & VM_WRITE) || GET_VM_OFFSET(vma))
98 		return -EINVAL;
99 
100 	pos = (ulong)hash_map;
101 	while (size > 0) {
102 		page = kvirt_to_pa(pos);
103 		if (remap_page_range(start, page, PAGE_SIZE, PAGE_SHARED))
104 			return -EAGAIN;
105 		start += PAGE_SIZE;
106 		pos += PAGE_SIZE;
107 		size -= PAGE_SIZE;
108 	}
109 	return 0;
110 }
111 
112 
113 #ifndef NEED_2_2_DENTRIES
wind_dentries_2_4(struct dentry * dentry,struct vfsmount * vfsmnt,struct dentry * root,struct vfsmount * rootmnt)114 int wind_dentries_2_4(struct dentry * dentry, struct vfsmount * vfsmnt, struct dentry * root, struct vfsmount * rootmnt)
115 {
116 	struct dentry * d = dentry;
117 	struct vfsmount * v = vfsmnt;
118 
119 	/* wind the dentries onto the stack pages */
120 	for (;;) {
121 		/* deleted ? */
122 		if (!IS_ROOT(d) && list_empty(&d->d_hash))
123 			return 0;
124 
125 		/* the root */
126 		if (d == root && v == rootmnt)
127 			break;
128 
129 		if (d == v->mnt_root || IS_ROOT(d)) {
130 			if (v->mnt_parent == v)
131 				break;
132 			/* cross the mount point */
133 			d = v->mnt_mountpoint;
134 			v = v->mnt_parent;
135 		}
136 
137 		push_dname(&d->d_name);
138 
139 		d = d->d_parent;
140 	}
141 
142 	return 1;
143 }
144 
145 /* called with note_lock held */
do_path_hash_2_4(struct dentry * dentry,struct vfsmount * vfsmnt)146 uint do_path_hash_2_4(struct dentry * dentry, struct vfsmount * vfsmnt)
147 {
148 	uint value;
149 	struct vfsmount * rootmnt;
150 	struct dentry * root;
151 
152 	read_lock(&current->fs->lock);
153 	rootmnt = mntget(current->fs->rootmnt);
154 	root = dget(current->fs->root);
155 	read_unlock(&current->fs->lock);
156 
157 	spin_lock(&dcache_lock);
158 
159 	value = do_hash(dentry, vfsmnt, root, rootmnt);
160 
161 	spin_unlock(&dcache_lock);
162 	dput(root);
163 	mntput(rootmnt);
164 	return value;
165 }
166 #endif /* NEED_2_2_DENTRIES */
167 
168 /* called with note_lock held */
do_hash(struct dentry * dentry,struct vfsmount * vfsmnt,struct dentry * root,struct vfsmount * rootmnt)169 uint do_hash(struct dentry * dentry, struct vfsmount * vfsmnt, struct dentry * root, struct vfsmount * rootmnt)
170 {
171 	struct qstr * dname;
172 	uint value = -1;
173 	uint firsthash;
174 	uint incr;
175 	uint parent = 0;
176 	struct op_hash_index * entry;
177 
178 	if (!wind_dentries(dentry, vfsmnt, root, rootmnt))
179 		goto out;
180 
181 	/* unwind and hash */
182 
183 	while ((dname = pop_dname())) {
184 		/* if N is prime, value in [0-N[ and incr = max(1, value) then
185 		 * iteration: value = (value + incr) % N covers the range [0-N[
186 		 * in N iterations */
187 		incr = firsthash = value = name_hash(dname->name, dname->len, parent);
188 		if (incr == 0)
189 			incr = 1;
190 
191 	retry:
192 		entry = &hash_map[value];
193 		/* existing entry ? */
194 		if (streq(get_from_pool(entry->name), dname->name)
195 			&& entry->parent == parent)
196 			goto next;
197 
198 		/* new entry ? */
199 		if (entry->parent == -1) {
200 			if (add_hash_entry(entry, parent, dname->name, dname->len))
201 				goto fullpool;
202 			goto next;
203 		}
204 
205 		/* nope, find another place in the table */
206 		value = (value + incr) % OP_HASH_MAP_NR;
207 
208 		if (value == firsthash)
209 			goto fulltable;
210 
211 		goto retry;
212 	next:
213 		parent = value;
214 	}
215 
216 out:
217 	op_dname_top = 0;
218 	return value;
219 fullpool:
220 	printk(KERN_ERR "oprofile: string pool exhausted.\n");
221 	value = -1;
222 	goto out;
223 fulltable:
224 	printk(KERN_ERR "oprofile: component hash table full :(\n");
225 	value = -1;
226 	goto out;
227 }
228