• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright(c) 2014 Intel Mobile Communications GmbH
4  * Copyright(c) 2015 Intel Deutschland GmbH
5  *
6  * Contact Information:
7  *  Intel Linux Wireless <ilw@linux.intel.com>
8  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
9  *
10  * Author: Johannes Berg <johannes@sipsolutions.net>
11  */
12 #include <linux/module.h>
13 #include <linux/device.h>
14 #include <linux/devcoredump.h>
15 #include <linux/list.h>
16 #include <linux/slab.h>
17 #include <linux/fs.h>
18 #include <linux/workqueue.h>
19 
20 static struct class devcd_class;
21 
22 /* global disable flag, for security purposes */
23 static bool devcd_disabled;
24 
25 /* if data isn't read by userspace after 5 minutes then delete it */
26 #define DEVCD_TIMEOUT	(HZ * 60 * 5)
27 
28 struct devcd_entry {
29 	struct device devcd_dev;
30 	void *data;
31 	size_t datalen;
32 	/*
33 	 * Here, mutex is required to serialize the calls to del_wk work between
34 	 * user/kernel space which happens when devcd is added with device_add()
35 	 * and that sends uevent to user space. User space reads the uevents,
36 	 * and calls to devcd_data_write() which try to modify the work which is
37 	 * not even initialized/queued from devcoredump.
38 	 *
39 	 *
40 	 *
41 	 *        cpu0(X)                                 cpu1(Y)
42 	 *
43 	 *        dev_coredump() uevent sent to user space
44 	 *        device_add()  ======================> user space process Y reads the
45 	 *                                              uevents writes to devcd fd
46 	 *                                              which results into writes to
47 	 *
48 	 *                                             devcd_data_write()
49 	 *                                               mod_delayed_work()
50 	 *                                                 try_to_grab_pending()
51 	 *                                                   del_timer()
52 	 *                                                     debug_assert_init()
53 	 *       INIT_DELAYED_WORK()
54 	 *       schedule_delayed_work()
55 	 *
56 	 *
57 	 * Also, mutex alone would not be enough to avoid scheduling of
58 	 * del_wk work after it get flush from a call to devcd_free()
59 	 * mentioned as below.
60 	 *
61 	 *	disabled_store()
62 	 *        devcd_free()
63 	 *          mutex_lock()             devcd_data_write()
64 	 *          flush_delayed_work()
65 	 *          mutex_unlock()
66 	 *                                   mutex_lock()
67 	 *                                   mod_delayed_work()
68 	 *                                   mutex_unlock()
69 	 * So, delete_work flag is required.
70 	 */
71 	struct mutex mutex;
72 	bool delete_work;
73 	struct module *owner;
74 	ssize_t (*read)(char *buffer, loff_t offset, size_t count,
75 			void *data, size_t datalen);
76 	void (*free)(void *data);
77 	struct delayed_work del_wk;
78 	struct device *failing_dev;
79 };
80 
dev_to_devcd(struct device * dev)81 static struct devcd_entry *dev_to_devcd(struct device *dev)
82 {
83 	return container_of(dev, struct devcd_entry, devcd_dev);
84 }
85 
devcd_dev_release(struct device * dev)86 static void devcd_dev_release(struct device *dev)
87 {
88 	struct devcd_entry *devcd = dev_to_devcd(dev);
89 
90 	devcd->free(devcd->data);
91 	module_put(devcd->owner);
92 
93 	/*
94 	 * this seems racy, but I don't see a notifier or such on
95 	 * a struct device to know when it goes away?
96 	 */
97 	if (devcd->failing_dev->kobj.sd)
98 		sysfs_delete_link(&devcd->failing_dev->kobj, &dev->kobj,
99 				  "devcoredump");
100 
101 	put_device(devcd->failing_dev);
102 	kfree(devcd);
103 }
104 
devcd_del(struct work_struct * wk)105 static void devcd_del(struct work_struct *wk)
106 {
107 	struct devcd_entry *devcd;
108 
109 	devcd = container_of(wk, struct devcd_entry, del_wk.work);
110 
111 	device_del(&devcd->devcd_dev);
112 	put_device(&devcd->devcd_dev);
113 }
114 
devcd_data_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buffer,loff_t offset,size_t count)115 static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
116 			       struct bin_attribute *bin_attr,
117 			       char *buffer, loff_t offset, size_t count)
118 {
119 	struct device *dev = kobj_to_dev(kobj);
120 	struct devcd_entry *devcd = dev_to_devcd(dev);
121 
122 	return devcd->read(buffer, offset, count, devcd->data, devcd->datalen);
123 }
124 
devcd_data_write(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buffer,loff_t offset,size_t count)125 static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
126 				struct bin_attribute *bin_attr,
127 				char *buffer, loff_t offset, size_t count)
128 {
129 	struct device *dev = kobj_to_dev(kobj);
130 	struct devcd_entry *devcd = dev_to_devcd(dev);
131 
132 	mutex_lock(&devcd->mutex);
133 	if (!devcd->delete_work) {
134 		devcd->delete_work = true;
135 		mod_delayed_work(system_wq, &devcd->del_wk, 0);
136 	}
137 	mutex_unlock(&devcd->mutex);
138 
139 	return count;
140 }
141 
142 static struct bin_attribute devcd_attr_data = {
143 	.attr = { .name = "data", .mode = S_IRUSR | S_IWUSR, },
144 	.size = 0,
145 	.read = devcd_data_read,
146 	.write = devcd_data_write,
147 };
148 
149 static struct bin_attribute *devcd_dev_bin_attrs[] = {
150 	&devcd_attr_data, NULL,
151 };
152 
153 static const struct attribute_group devcd_dev_group = {
154 	.bin_attrs = devcd_dev_bin_attrs,
155 };
156 
157 static const struct attribute_group *devcd_dev_groups[] = {
158 	&devcd_dev_group, NULL,
159 };
160 
devcd_free(struct device * dev,void * data)161 static int devcd_free(struct device *dev, void *data)
162 {
163 	struct devcd_entry *devcd = dev_to_devcd(dev);
164 
165 	mutex_lock(&devcd->mutex);
166 	if (!devcd->delete_work)
167 		devcd->delete_work = true;
168 
169 	flush_delayed_work(&devcd->del_wk);
170 	mutex_unlock(&devcd->mutex);
171 	return 0;
172 }
173 
disabled_show(struct class * class,struct class_attribute * attr,char * buf)174 static ssize_t disabled_show(struct class *class, struct class_attribute *attr,
175 			     char *buf)
176 {
177 	return sysfs_emit(buf, "%d\n", devcd_disabled);
178 }
179 
180 /*
181  *
182  *	disabled_store()                                	worker()
183  *	 class_for_each_device(&devcd_class,
184  *		NULL, NULL, devcd_free)
185  *         ...
186  *         ...
187  *	   while ((dev = class_dev_iter_next(&iter))
188  *                                                             devcd_del()
189  *                                                               device_del()
190  *                                                                 put_device() <- last reference
191  *             error = fn(dev, data)                           devcd_dev_release()
192  *             devcd_free(dev, data)                           kfree(devcd)
193  *             mutex_lock(&devcd->mutex);
194  *
195  *
196  * In the above diagram, It looks like disabled_store() would be racing with parallely
197  * running devcd_del() and result in memory abort while acquiring devcd->mutex which
198  * is called after kfree of devcd memory  after dropping its last reference with
199  * put_device(). However, this will not happens as fn(dev, data) runs
200  * with its own reference to device via klist_node so it is not its last reference.
201  * so, above situation would not occur.
202  */
203 
disabled_store(struct class * class,struct class_attribute * attr,const char * buf,size_t count)204 static ssize_t disabled_store(struct class *class, struct class_attribute *attr,
205 			      const char *buf, size_t count)
206 {
207 	long tmp = simple_strtol(buf, NULL, 10);
208 
209 	/*
210 	 * This essentially makes the attribute write-once, since you can't
211 	 * go back to not having it disabled. This is intentional, it serves
212 	 * as a system lockdown feature.
213 	 */
214 	if (tmp != 1)
215 		return -EINVAL;
216 
217 	devcd_disabled = true;
218 
219 	class_for_each_device(&devcd_class, NULL, NULL, devcd_free);
220 
221 	return count;
222 }
223 static CLASS_ATTR_RW(disabled);
224 
225 static struct attribute *devcd_class_attrs[] = {
226 	&class_attr_disabled.attr,
227 	NULL,
228 };
229 ATTRIBUTE_GROUPS(devcd_class);
230 
231 static struct class devcd_class = {
232 	.name		= "devcoredump",
233 	.owner		= THIS_MODULE,
234 	.dev_release	= devcd_dev_release,
235 	.dev_groups	= devcd_dev_groups,
236 	.class_groups	= devcd_class_groups,
237 };
238 
devcd_readv(char * buffer,loff_t offset,size_t count,void * data,size_t datalen)239 static ssize_t devcd_readv(char *buffer, loff_t offset, size_t count,
240 			   void *data, size_t datalen)
241 {
242 	return memory_read_from_buffer(buffer, count, &offset, data, datalen);
243 }
244 
devcd_freev(void * data)245 static void devcd_freev(void *data)
246 {
247 	vfree(data);
248 }
249 
250 /**
251  * dev_coredumpv - create device coredump with vmalloc data
252  * @dev: the struct device for the crashed device
253  * @data: vmalloc data containing the device coredump
254  * @datalen: length of the data
255  * @gfp: allocation flags
256  *
257  * This function takes ownership of the vmalloc'ed data and will free
258  * it when it is no longer used. See dev_coredumpm() for more information.
259  */
dev_coredumpv(struct device * dev,void * data,size_t datalen,gfp_t gfp)260 void dev_coredumpv(struct device *dev, void *data, size_t datalen,
261 		   gfp_t gfp)
262 {
263 	dev_coredumpm(dev, NULL, data, datalen, gfp, devcd_readv, devcd_freev);
264 }
265 EXPORT_SYMBOL_GPL(dev_coredumpv);
266 
devcd_match_failing(struct device * dev,const void * failing)267 static int devcd_match_failing(struct device *dev, const void *failing)
268 {
269 	struct devcd_entry *devcd = dev_to_devcd(dev);
270 
271 	return devcd->failing_dev == failing;
272 }
273 
274 /**
275  * devcd_free_sgtable - free all the memory of the given scatterlist table
276  * (i.e. both pages and scatterlist instances)
277  * NOTE: if two tables allocated with devcd_alloc_sgtable and then chained
278  * using the sg_chain function then that function should be called only once
279  * on the chained table
280  * @table: pointer to sg_table to free
281  */
devcd_free_sgtable(void * data)282 static void devcd_free_sgtable(void *data)
283 {
284 	_devcd_free_sgtable(data);
285 }
286 
287 /**
288  * devcd_read_from_table - copy data from sg_table to a given buffer
289  * and return the number of bytes read
290  * @buffer: the buffer to copy the data to it
291  * @buf_len: the length of the buffer
292  * @data: the scatterlist table to copy from
293  * @offset: start copy from @offset@ bytes from the head of the data
294  *	in the given scatterlist
295  * @data_len: the length of the data in the sg_table
296  */
devcd_read_from_sgtable(char * buffer,loff_t offset,size_t buf_len,void * data,size_t data_len)297 static ssize_t devcd_read_from_sgtable(char *buffer, loff_t offset,
298 				       size_t buf_len, void *data,
299 				       size_t data_len)
300 {
301 	struct scatterlist *table = data;
302 
303 	if (offset > data_len)
304 		return -EINVAL;
305 
306 	if (offset + buf_len > data_len)
307 		buf_len = data_len - offset;
308 	return sg_pcopy_to_buffer(table, sg_nents(table), buffer, buf_len,
309 				  offset);
310 }
311 
312 /**
313  * dev_coredumpm - create device coredump with read/free methods
314  * @dev: the struct device for the crashed device
315  * @owner: the module that contains the read/free functions, use %THIS_MODULE
316  * @data: data cookie for the @read/@free functions
317  * @datalen: length of the data
318  * @gfp: allocation flags
319  * @read: function to read from the given buffer
320  * @free: function to free the given buffer
321  *
322  * Creates a new device coredump for the given device. If a previous one hasn't
323  * been read yet, the new coredump is discarded. The data lifetime is determined
324  * by the device coredump framework and when it is no longer needed the @free
325  * function will be called to free the data.
326  */
dev_coredumpm(struct device * dev,struct module * owner,void * data,size_t datalen,gfp_t gfp,ssize_t (* read)(char * buffer,loff_t offset,size_t count,void * data,size_t datalen),void (* free)(void * data))327 void dev_coredumpm(struct device *dev, struct module *owner,
328 		   void *data, size_t datalen, gfp_t gfp,
329 		   ssize_t (*read)(char *buffer, loff_t offset, size_t count,
330 				   void *data, size_t datalen),
331 		   void (*free)(void *data))
332 {
333 	static atomic_t devcd_count = ATOMIC_INIT(0);
334 	struct devcd_entry *devcd;
335 	struct device *existing;
336 
337 	if (devcd_disabled)
338 		goto free;
339 
340 	existing = class_find_device(&devcd_class, NULL, dev,
341 				     devcd_match_failing);
342 	if (existing) {
343 		put_device(existing);
344 		goto free;
345 	}
346 
347 	if (!try_module_get(owner))
348 		goto free;
349 
350 	devcd = kzalloc(sizeof(*devcd), gfp);
351 	if (!devcd)
352 		goto put_module;
353 
354 	devcd->owner = owner;
355 	devcd->data = data;
356 	devcd->datalen = datalen;
357 	devcd->read = read;
358 	devcd->free = free;
359 	devcd->failing_dev = get_device(dev);
360 	devcd->delete_work = false;
361 
362 	mutex_init(&devcd->mutex);
363 	device_initialize(&devcd->devcd_dev);
364 
365 	dev_set_name(&devcd->devcd_dev, "devcd%d",
366 		     atomic_inc_return(&devcd_count));
367 	devcd->devcd_dev.class = &devcd_class;
368 
369 	mutex_lock(&devcd->mutex);
370 	dev_set_uevent_suppress(&devcd->devcd_dev, true);
371 	if (device_add(&devcd->devcd_dev))
372 		goto put_device;
373 
374 	if (sysfs_create_link(&devcd->devcd_dev.kobj, &dev->kobj,
375 			      "failing_device"))
376 		/* nothing - symlink will be missing */;
377 
378 	if (sysfs_create_link(&dev->kobj, &devcd->devcd_dev.kobj,
379 			      "devcoredump"))
380 		/* nothing - symlink will be missing */;
381 
382 	dev_set_uevent_suppress(&devcd->devcd_dev, false);
383 	kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD);
384 	INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
385 	schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT);
386 	mutex_unlock(&devcd->mutex);
387 	return;
388  put_device:
389 	put_device(&devcd->devcd_dev);
390 	mutex_unlock(&devcd->mutex);
391  put_module:
392 	module_put(owner);
393  free:
394 	free(data);
395 }
396 EXPORT_SYMBOL_GPL(dev_coredumpm);
397 
398 /**
399  * dev_coredumpsg - create device coredump that uses scatterlist as data
400  * parameter
401  * @dev: the struct device for the crashed device
402  * @table: the dump data
403  * @datalen: length of the data
404  * @gfp: allocation flags
405  *
406  * Creates a new device coredump for the given device. If a previous one hasn't
407  * been read yet, the new coredump is discarded. The data lifetime is determined
408  * by the device coredump framework and when it is no longer needed
409  * it will free the data.
410  */
dev_coredumpsg(struct device * dev,struct scatterlist * table,size_t datalen,gfp_t gfp)411 void dev_coredumpsg(struct device *dev, struct scatterlist *table,
412 		    size_t datalen, gfp_t gfp)
413 {
414 	dev_coredumpm(dev, NULL, table, datalen, gfp, devcd_read_from_sgtable,
415 		      devcd_free_sgtable);
416 }
417 EXPORT_SYMBOL_GPL(dev_coredumpsg);
418 
devcoredump_init(void)419 static int __init devcoredump_init(void)
420 {
421 	return class_register(&devcd_class);
422 }
423 __initcall(devcoredump_init);
424 
devcoredump_exit(void)425 static void __exit devcoredump_exit(void)
426 {
427 	class_for_each_device(&devcd_class, NULL, NULL, devcd_free);
428 	class_unregister(&devcd_class);
429 }
430 __exitcall(devcoredump_exit);
431