1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2011-2024 Google LLC
4 */
5 #include "android_configfs_uevent.h"
6 #include <linux/device.h>
7 #include <linux/device/class.h>
8 #include <linux/err.h>
9 #include <linux/kdev_t.h>
10 #include <linux/spinlock.h>
11
12 static struct android_uevent_opts *android_opts;
13
14 static DEFINE_SPINLOCK(opts_lock);
15 static DEFINE_IDA(android_ida);
16
android_work(struct work_struct * data)17 static void android_work(struct work_struct *data)
18 {
19 struct android_uevent_opts *opts = container_of(data,
20 struct android_uevent_opts, work);
21
22 char *disconnected_strs[2] = { "USB_STATE=DISCONNECTED", NULL };
23 char *connected_strs[2] = { "USB_STATE=CONNECTED", NULL };
24 char *configured_strs[2] = { "USB_STATE=CONFIGURED", NULL };
25 unsigned long flags;
26 bool disconnected = false;
27 bool connected = false;
28 bool configured = false;
29 bool uevent_sent = false;
30 struct device *dev;
31
32 /*
33 * I believe locking is important due to the fact that we are checking
34 * several conditions here, and if the state changes after checking one
35 * we could potentially drop a uevent to userspace. Additionally, we
36 * want to prevent teardown until after events are sent.
37 */
38 spin_lock_irqsave(&opts_lock, flags);
39
40 /*
41 * If the device does not exist, it means we were torn down after
42 * scheduling this work, but before the work ran, so return to prevent
43 * use after free.
44 */
45 if (!opts->dev) {
46 spin_unlock_irqrestore(&opts_lock, flags);
47 return;
48 }
49
50 /*
51 * Cache the dev pointer in the locked area incase it gets cleared by
52 * android_device_destroy() after we release the lock. The call to
53 * flush_work in the cleanup path ensures we finish our work prior to
54 * destroying the dev which we have cached the pointer to. Ideally,
55 * this would be handled differently (using reference counting), but
56 * for now this should work.
57 */
58 dev = opts->dev;
59
60 if (opts->connected != opts->sw_connected) {
61 if (opts->connected)
62 connected = true;
63 else
64 disconnected = true;
65 opts->sw_connected = opts->connected;
66 }
67 if (opts->configured)
68 configured = true;
69
70 spin_unlock_irqrestore(&opts_lock, flags);
71
72 /*
73 * This is an abuse of uevents, however the android userspace parses
74 * the uevent string for information instead of reading the state from
75 * sysfs entries. This is one of several things about this driver which
76 * would need to change to upstream it. In an attempt to keep the
77 * exising userspace api unmodified until either an upstream solution
78 * is implemented or this functionality is otherwise replaced, leave
79 * the pre-existing logic in place.
80 */
81 if (connected) {
82 if (kobject_uevent_env(&dev->kobj, KOBJ_CHANGE,
83 connected_strs)) {
84 dev_err(dev, "Failed to send connected uevent\n");
85 } else {
86 dev_dbg(dev, "sent uevent %s\n", connected_strs[0]);
87 uevent_sent = true;
88 }
89 }
90
91 if (configured) {
92 if (kobject_uevent_env(&dev->kobj, KOBJ_CHANGE,
93 configured_strs)) {
94 dev_err(dev, "Failed to send configured uevent\n");
95 } else {
96 dev_dbg(dev, "sent uevent %s\n", configured_strs[0]);
97 uevent_sent = true;
98 }
99 }
100
101 if (disconnected) {
102 if (kobject_uevent_env(&dev->kobj, KOBJ_CHANGE,
103 disconnected_strs)) {
104 dev_err(dev, "Failed to send disconnected uevent\n");
105 } else {
106 dev_dbg(dev, "sent uevent %s\n", disconnected_strs[0]);
107 uevent_sent = true;
108 }
109 }
110
111 if (!uevent_sent) {
112 /*
113 * This is an odd case, but not necessarily an error- the state
114 * of the device may have changed since the work was scheduled,
115 * and if the state changed, there is likely another scheduled
116 * work which will send a uevent.
117 */
118 dev_dbg(dev, "did not send uevent\n");
119 }
120 }
121
state_show(struct device * pdev,struct device_attribute * attr,char * buf)122 static ssize_t state_show(struct device *pdev,
123 struct device_attribute *attr,
124 char *buf)
125 {
126 struct android_uevent_opts *opts = dev_get_drvdata(pdev);
127 char *state = "DISCONNECTED";
128
129 if (opts->configured)
130 state = "CONFIGURED";
131 else if (opts->connected)
132 state = "CONNECTED";
133
134 return sysfs_emit(buf, "%s\n", state);
135 }
136 static DEVICE_ATTR_RO(state);
137
138 static struct attribute *android_usb_attrs[] = {
139 &dev_attr_state.attr,
140 NULL,
141 };
142
143 ATTRIBUTE_GROUPS(android_usb);
144
145 static struct class android_usb_class = {
146 .name = "android_usb",
147 .dev_groups = android_usb_groups,
148 };
149
android_class_create(void)150 int android_class_create(void)
151 {
152 return class_register(&android_usb_class);
153 }
154
android_class_destroy(void)155 void android_class_destroy(void)
156 {
157 class_unregister(&android_usb_class);
158 }
159
android_device_create(struct android_uevent_opts * opts)160 int android_device_create(struct android_uevent_opts *opts)
161 {
162 unsigned long flags;
163 struct device *dev;
164
165 spin_lock_irqsave(&opts_lock, flags);
166 INIT_WORK(&opts->work, android_work);
167
168 opts->device_id = ida_alloc(&android_ida, GFP_ATOMIC);
169 //Unlock prior to calling device_create() since it may sleep
170 spin_unlock_irqrestore(&opts_lock, flags);
171 if (opts->device_id < 0)
172 return opts->device_id;
173
174 dev = device_create(&android_usb_class, NULL, MKDEV(0, 0),
175 opts, "android%d", opts->device_id);
176
177 spin_lock_irqsave(&opts_lock, flags);
178 if (IS_ERR(dev)) {
179 ida_free(&android_ida, opts->device_id);
180 opts->device_id = -1;
181 spin_unlock_irqrestore(&opts_lock, flags);
182 return PTR_ERR(dev);
183 }
184 opts->dev = dev;
185 ida_init(&opts->function_ida);
186 if (!android_opts)
187 android_opts = opts;
188 spin_unlock_irqrestore(&opts_lock, flags);
189
190 return 0;
191 }
192
android_device_destroy(struct android_uevent_opts * opts)193 void android_device_destroy(struct android_uevent_opts *opts)
194 {
195 unsigned long flags;
196 struct device *dev;
197
198 /*
199 * This scheme is used to safely cleanup any remaining work. Once
200 * opts->dev is set to NULL, any newly scheduled work will return
201 * after getting the lock and checking for NULL. Any currently
202 * running work finishes with the flush_work (the worker caches
203 * opts->dev so it can continue), before we free the device.
204 *
205 * Ideally, this cleanup would be handled via reference counting, but
206 * there are nuances around device destroy (or the fact that we are
207 * currently statically allocating opts) which prevent this from
208 * being implemented without a significant refactor.
209 */
210 spin_lock_irqsave(&opts_lock, flags);
211 dev = opts->dev;
212 opts->dev = NULL;
213 spin_unlock_irqrestore(&opts_lock, flags);
214
215 flush_work(&opts->work);
216
217 spin_lock_irqsave(&opts_lock, flags);
218 if (opts->device_id >= 0)
219 ida_free(&android_ida, opts->device_id);
220
221 android_opts = NULL;
222 ida_destroy(&opts->function_ida);
223 device_destroy(dev->class, dev->devt);
224 spin_unlock_irqrestore(&opts_lock, flags);
225 }
226
__android_set_connected(struct android_uevent_opts * opts,bool connected)227 void __android_set_connected(struct android_uevent_opts *opts,
228 bool connected)
229 {
230 unsigned long flags;
231
232 spin_lock_irqsave(&opts_lock, flags);
233 // Don't send the uevent if connected state is not changed
234 if (opts->connected != connected) {
235 opts->connected = connected;
236 schedule_work(&opts->work);
237 }
238 spin_unlock_irqrestore(&opts_lock, flags);
239 }
240
__android_set_configured(struct android_uevent_opts * opts,bool configured)241 void __android_set_configured(struct android_uevent_opts *opts,
242 bool configured)
243 {
244 unsigned long flags;
245
246 spin_lock_irqsave(&opts_lock, flags);
247 // Don't send the uevent if configure state is not changed
248 if (opts->configured != configured) {
249 opts->configured = configured;
250 schedule_work(&opts->work);
251 }
252 spin_unlock_irqrestore(&opts_lock, flags);
253 }
254
android_set_connected(struct android_uevent_opts * opts)255 void android_set_connected(struct android_uevent_opts *opts)
256 {
257 __android_set_connected(opts, true);
258 }
259
android_set_disconnected(struct android_uevent_opts * opts)260 void android_set_disconnected(struct android_uevent_opts *opts)
261 {
262 __android_set_connected(opts, false);
263 }
264
android_set_configured(struct android_uevent_opts * opts)265 void android_set_configured(struct android_uevent_opts *opts)
266 {
267 __android_set_configured(opts, true);
268 }
269
android_set_unconfigured(struct android_uevent_opts * opts)270 void android_set_unconfigured(struct android_uevent_opts *opts)
271 {
272 __android_set_configured(opts, false);
273 }
274
android_create_function_device(char * name,void * drvdata,const struct attribute_group ** groups)275 struct device *android_create_function_device(char *name, void *drvdata,
276 const struct attribute_group **groups)
277 {
278 struct android_uevent_opts *opts;
279 struct device *dev;
280 unsigned long flags;
281 int id;
282
283 spin_lock_irqsave(&opts_lock, flags);
284 opts = android_opts;
285 if (IS_ERR_OR_NULL(opts) || IS_ERR_OR_NULL(opts->dev)) {
286 spin_unlock_irqrestore(&opts_lock, flags);
287 return ERR_PTR(-ENODEV);
288 }
289
290 id = ida_alloc(&opts->function_ida, GFP_ATOMIC);
291 if (id < 0) {
292 spin_unlock_irqrestore(&opts_lock, flags);
293 return ERR_PTR(id);
294 }
295 // device_create_with_groups can sleep, so we must unlock first
296 spin_unlock_irqrestore(&opts_lock, flags);
297 dev = device_create_with_groups(&android_usb_class, opts->dev,
298 MKDEV(0, id), drvdata, groups, name);
299 return dev;
300 }
301 EXPORT_SYMBOL_GPL(android_create_function_device);
302
android_remove_function_device(struct device * dev)303 void android_remove_function_device(struct device *dev)
304 {
305 struct android_uevent_opts *opts;
306 unsigned long flags;
307
308 device_destroy(&android_usb_class, dev->devt);
309
310 spin_lock_irqsave(&opts_lock, flags);
311 opts = android_opts;
312 if (IS_ERR_OR_NULL(opts)) {
313 spin_unlock_irqrestore(&opts_lock, flags);
314 return;
315 }
316
317 ida_free(&opts->function_ida, MINOR(dev->devt));
318 spin_unlock_irqrestore(&opts_lock, flags);
319 }
320 EXPORT_SYMBOL_GPL(android_remove_function_device);
321