• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * @file event_buffer.c
3  *
4  * @remark Copyright 2002 OProfile authors
5  * @remark Read the file COPYING
6  *
7  * @author John Levon <levon@movementarian.org>
8  *
9  * This is the global event buffer that the user-space
10  * daemon reads from. The event buffer is an untyped array
11  * of unsigned longs. Entries are prefixed by the
12  * escape value ESCAPE_CODE followed by an identifying code.
13  */
14 
15 #include <linux/vmalloc.h>
16 #include <linux/oprofile.h>
17 #include <linux/sched.h>
18 #include <linux/capability.h>
19 #include <linux/dcookies.h>
20 #include <linux/fs.h>
21 #include <asm/uaccess.h>
22 
23 #include "oprof.h"
24 #include "event_buffer.h"
25 #include "oprofile_stats.h"
26 
27 DEFINE_MUTEX(buffer_mutex);
28 
29 static unsigned long buffer_opened;
30 static DECLARE_WAIT_QUEUE_HEAD(buffer_wait);
31 static unsigned long *event_buffer;
32 static unsigned long buffer_size;
33 static unsigned long buffer_watershed;
34 static size_t buffer_pos;
35 /* atomic_t because wait_event checks it outside of buffer_mutex */
36 static atomic_t buffer_ready = ATOMIC_INIT(0);
37 
38 /*
39  * Add an entry to the event buffer. When we get near to the end we
40  * wake up the process sleeping on the read() of the file. To protect
41  * the event_buffer this function may only be called when buffer_mutex
42  * is set.
43  */
add_event_entry(unsigned long value)44 void add_event_entry(unsigned long value)
45 {
46 	/*
47 	 * This shouldn't happen since all workqueues or handlers are
48 	 * canceled or flushed before the event buffer is freed.
49 	 */
50 	if (!event_buffer) {
51 		WARN_ON_ONCE(1);
52 		return;
53 	}
54 
55 	if (buffer_pos == buffer_size) {
56 		atomic_inc(&oprofile_stats.event_lost_overflow);
57 		return;
58 	}
59 
60 	event_buffer[buffer_pos] = value;
61 	if (++buffer_pos == buffer_size - buffer_watershed) {
62 		atomic_set(&buffer_ready, 1);
63 		wake_up(&buffer_wait);
64 	}
65 }
66 
67 
68 /* Wake up the waiting process if any. This happens
69  * on "echo 0 >/dev/oprofile/enable" so the daemon
70  * processes the data remaining in the event buffer.
71  */
wake_up_buffer_waiter(void)72 void wake_up_buffer_waiter(void)
73 {
74 	mutex_lock(&buffer_mutex);
75 	atomic_set(&buffer_ready, 1);
76 	wake_up(&buffer_wait);
77 	mutex_unlock(&buffer_mutex);
78 }
79 
80 
alloc_event_buffer(void)81 int alloc_event_buffer(void)
82 {
83 	unsigned long flags;
84 
85 	raw_spin_lock_irqsave(&oprofilefs_lock, flags);
86 	buffer_size = oprofile_buffer_size;
87 	buffer_watershed = oprofile_buffer_watershed;
88 	raw_spin_unlock_irqrestore(&oprofilefs_lock, flags);
89 
90 	if (buffer_watershed >= buffer_size)
91 		return -EINVAL;
92 
93 	buffer_pos = 0;
94 	event_buffer = vmalloc(sizeof(unsigned long) * buffer_size);
95 	if (!event_buffer)
96 		return -ENOMEM;
97 
98 	return 0;
99 }
100 
101 
free_event_buffer(void)102 void free_event_buffer(void)
103 {
104 	mutex_lock(&buffer_mutex);
105 	vfree(event_buffer);
106 	buffer_pos = 0;
107 	event_buffer = NULL;
108 	mutex_unlock(&buffer_mutex);
109 }
110 
111 
event_buffer_open(struct inode * inode,struct file * file)112 static int event_buffer_open(struct inode *inode, struct file *file)
113 {
114 	int err = -EPERM;
115 
116 	if (!capable(CAP_SYS_ADMIN))
117 		return -EPERM;
118 
119 	if (test_and_set_bit_lock(0, &buffer_opened))
120 		return -EBUSY;
121 
122 	/* Register as a user of dcookies
123 	 * to ensure they persist for the lifetime of
124 	 * the open event file
125 	 */
126 	err = -EINVAL;
127 	file->private_data = dcookie_register();
128 	if (!file->private_data)
129 		goto out;
130 
131 	if ((err = oprofile_setup()))
132 		goto fail;
133 
134 	/* NB: the actual start happens from userspace
135 	 * echo 1 >/dev/oprofile/enable
136 	 */
137 
138 	return nonseekable_open(inode, file);
139 
140 fail:
141 	dcookie_unregister(file->private_data);
142 out:
143 	__clear_bit_unlock(0, &buffer_opened);
144 	return err;
145 }
146 
147 
event_buffer_release(struct inode * inode,struct file * file)148 static int event_buffer_release(struct inode *inode, struct file *file)
149 {
150 	oprofile_stop();
151 	oprofile_shutdown();
152 	dcookie_unregister(file->private_data);
153 	buffer_pos = 0;
154 	atomic_set(&buffer_ready, 0);
155 	__clear_bit_unlock(0, &buffer_opened);
156 	return 0;
157 }
158 
159 
event_buffer_read(struct file * file,char __user * buf,size_t count,loff_t * offset)160 static ssize_t event_buffer_read(struct file *file, char __user *buf,
161 				 size_t count, loff_t *offset)
162 {
163 	int retval = -EINVAL;
164 	size_t const max = buffer_size * sizeof(unsigned long);
165 
166 	/* handling partial reads is more trouble than it's worth */
167 	if (count != max || *offset)
168 		return -EINVAL;
169 
170 	wait_event_interruptible(buffer_wait, atomic_read(&buffer_ready));
171 
172 	if (signal_pending(current))
173 		return -EINTR;
174 
175 	/* can't currently happen */
176 	if (!atomic_read(&buffer_ready))
177 		return -EAGAIN;
178 
179 	mutex_lock(&buffer_mutex);
180 
181 	/* May happen if the buffer is freed during pending reads. */
182 	if (!event_buffer) {
183 		retval = -EINTR;
184 		goto out;
185 	}
186 
187 	atomic_set(&buffer_ready, 0);
188 
189 	retval = -EFAULT;
190 
191 	count = buffer_pos * sizeof(unsigned long);
192 
193 	if (copy_to_user(buf, event_buffer, count))
194 		goto out;
195 
196 	retval = count;
197 	buffer_pos = 0;
198 
199 out:
200 	mutex_unlock(&buffer_mutex);
201 	return retval;
202 }
203 
204 const struct file_operations event_buffer_fops = {
205 	.open		= event_buffer_open,
206 	.release	= event_buffer_release,
207 	.read		= event_buffer_read,
208 	.llseek		= no_llseek,
209 };
210