• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This module provides an interface to trigger and test firmware loading.
4  *
5  * It is designed to be used for basic evaluation of the firmware loading
6  * subsystem (for example when validating firmware verification). It lacks
7  * any extra dependencies, and will not normally be loaded by the system
8  * unless explicitly requested by name.
9  */
10 
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/printk.h>
16 #include <linux/completion.h>
17 #include <linux/firmware.h>
18 #include <linux/device.h>
19 #include <linux/fs.h>
20 #include <linux/miscdevice.h>
21 #include <linux/sizes.h>
22 #include <linux/slab.h>
23 #include <linux/uaccess.h>
24 #include <linux/delay.h>
25 #include <linux/kthread.h>
26 #include <linux/vmalloc.h>
27 
28 #define TEST_FIRMWARE_NAME	"test-firmware.bin"
29 #define TEST_FIRMWARE_NUM_REQS	4
30 #define TEST_FIRMWARE_BUF_SIZE	SZ_1K
31 
32 static DEFINE_MUTEX(test_fw_mutex);
33 static const struct firmware *test_firmware;
34 
35 struct test_batched_req {
36 	u8 idx;
37 	int rc;
38 	bool sent;
39 	const struct firmware *fw;
40 	const char *name;
41 	const char *fw_buf;
42 	struct completion completion;
43 	struct task_struct *task;
44 	struct device *dev;
45 };
46 
47 /**
48  * test_config - represents configuration for the test for different triggers
49  *
50  * @name: the name of the firmware file to look for
51  * @into_buf: when the into_buf is used if this is true
52  *	request_firmware_into_buf() will be used instead.
53  * @sync_direct: when the sync trigger is used if this is true
54  *	request_firmware_direct() will be used instead.
55  * @send_uevent: whether or not to send a uevent for async requests
56  * @num_requests: number of requests to try per test case. This is trigger
57  *	specific.
58  * @reqs: stores all requests information
59  * @read_fw_idx: index of thread from which we want to read firmware results
60  *	from through the read_fw trigger.
61  * @test_result: a test may use this to collect the result from the call
62  *	of the request_firmware*() calls used in their tests. In order of
63  *	priority we always keep first any setup error. If no setup errors were
64  *	found then we move on to the first error encountered while running the
65  *	API. Note that for async calls this typically will be a successful
66  *	result (0) unless of course you've used bogus parameters, or the system
67  *	is out of memory.  In the async case the callback is expected to do a
68  *	bit more homework to figure out what happened, unfortunately the only
69  *	information passed today on error is the fact that no firmware was
70  *	found so we can only assume -ENOENT on async calls if the firmware is
71  *	NULL.
72  *
73  *	Errors you can expect:
74  *
75  *	API specific:
76  *
77  *	0:		success for sync, for async it means request was sent
78  *	-EINVAL:	invalid parameters or request
79  *	-ENOENT:	files not found
80  *
81  *	System environment:
82  *
83  *	-ENOMEM:	memory pressure on system
84  *	-ENODEV:	out of number of devices to test
85  *	-EINVAL:	an unexpected error has occurred
86  * @req_firmware: if @sync_direct is true this is set to
87  *	request_firmware_direct(), otherwise request_firmware()
88  */
89 struct test_config {
90 	char *name;
91 	bool into_buf;
92 	bool sync_direct;
93 	bool send_uevent;
94 	u8 num_requests;
95 	u8 read_fw_idx;
96 
97 	/*
98 	 * These below don't belong her but we'll move them once we create
99 	 * a struct fw_test_device and stuff the misc_dev under there later.
100 	 */
101 	struct test_batched_req *reqs;
102 	int test_result;
103 	int (*req_firmware)(const struct firmware **fw, const char *name,
104 			    struct device *device);
105 };
106 
107 static struct test_config *test_fw_config;
108 
test_fw_misc_read(struct file * f,char __user * buf,size_t size,loff_t * offset)109 static ssize_t test_fw_misc_read(struct file *f, char __user *buf,
110 				 size_t size, loff_t *offset)
111 {
112 	ssize_t rc = 0;
113 
114 	mutex_lock(&test_fw_mutex);
115 	if (test_firmware)
116 		rc = simple_read_from_buffer(buf, size, offset,
117 					     test_firmware->data,
118 					     test_firmware->size);
119 	mutex_unlock(&test_fw_mutex);
120 	return rc;
121 }
122 
123 static const struct file_operations test_fw_fops = {
124 	.owner          = THIS_MODULE,
125 	.read           = test_fw_misc_read,
126 };
127 
__test_release_all_firmware(void)128 static void __test_release_all_firmware(void)
129 {
130 	struct test_batched_req *req;
131 	u8 i;
132 
133 	if (!test_fw_config->reqs)
134 		return;
135 
136 	for (i = 0; i < test_fw_config->num_requests; i++) {
137 		req = &test_fw_config->reqs[i];
138 		if (req->fw) {
139 			if (req->fw_buf) {
140 				kfree_const(req->fw_buf);
141 				req->fw_buf = NULL;
142 			}
143 			release_firmware(req->fw);
144 			req->fw = NULL;
145 		}
146 	}
147 
148 	vfree(test_fw_config->reqs);
149 	test_fw_config->reqs = NULL;
150 }
151 
test_release_all_firmware(void)152 static void test_release_all_firmware(void)
153 {
154 	mutex_lock(&test_fw_mutex);
155 	__test_release_all_firmware();
156 	mutex_unlock(&test_fw_mutex);
157 }
158 
159 
__test_firmware_config_free(void)160 static void __test_firmware_config_free(void)
161 {
162 	__test_release_all_firmware();
163 	kfree_const(test_fw_config->name);
164 	test_fw_config->name = NULL;
165 }
166 
167 /*
168  * XXX: move to kstrncpy() once merged.
169  *
170  * Users should use kfree_const() when freeing these.
171  */
__kstrncpy(char ** dst,const char * name,size_t count,gfp_t gfp)172 static int __kstrncpy(char **dst, const char *name, size_t count, gfp_t gfp)
173 {
174 	*dst = kstrndup(name, count, gfp);
175 	if (!*dst)
176 		return -ENOMEM;
177 	return count;
178 }
179 
__test_firmware_config_init(void)180 static int __test_firmware_config_init(void)
181 {
182 	int ret;
183 
184 	ret = __kstrncpy(&test_fw_config->name, TEST_FIRMWARE_NAME,
185 			 strlen(TEST_FIRMWARE_NAME), GFP_KERNEL);
186 	if (ret < 0)
187 		goto out;
188 
189 	test_fw_config->num_requests = TEST_FIRMWARE_NUM_REQS;
190 	test_fw_config->send_uevent = true;
191 	test_fw_config->into_buf = false;
192 	test_fw_config->sync_direct = false;
193 	test_fw_config->req_firmware = request_firmware;
194 	test_fw_config->test_result = 0;
195 	test_fw_config->reqs = NULL;
196 
197 	return 0;
198 
199 out:
200 	__test_firmware_config_free();
201 	return ret;
202 }
203 
reset_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)204 static ssize_t reset_store(struct device *dev,
205 			   struct device_attribute *attr,
206 			   const char *buf, size_t count)
207 {
208 	int ret;
209 
210 	mutex_lock(&test_fw_mutex);
211 
212 	__test_firmware_config_free();
213 
214 	ret = __test_firmware_config_init();
215 	if (ret < 0) {
216 		ret = -ENOMEM;
217 		pr_err("could not alloc settings for config trigger: %d\n",
218 		       ret);
219 		goto out;
220 	}
221 
222 	pr_info("reset\n");
223 	ret = count;
224 
225 out:
226 	mutex_unlock(&test_fw_mutex);
227 
228 	return ret;
229 }
230 static DEVICE_ATTR_WO(reset);
231 
config_show(struct device * dev,struct device_attribute * attr,char * buf)232 static ssize_t config_show(struct device *dev,
233 			   struct device_attribute *attr,
234 			   char *buf)
235 {
236 	int len = 0;
237 
238 	mutex_lock(&test_fw_mutex);
239 
240 	len += scnprintf(buf, PAGE_SIZE - len,
241 			"Custom trigger configuration for: %s\n",
242 			dev_name(dev));
243 
244 	if (test_fw_config->name)
245 		len += scnprintf(buf+len, PAGE_SIZE - len,
246 				"name:\t%s\n",
247 				test_fw_config->name);
248 	else
249 		len += scnprintf(buf+len, PAGE_SIZE - len,
250 				"name:\tEMTPY\n");
251 
252 	len += scnprintf(buf+len, PAGE_SIZE - len,
253 			"num_requests:\t%u\n", test_fw_config->num_requests);
254 
255 	len += scnprintf(buf+len, PAGE_SIZE - len,
256 			"send_uevent:\t\t%s\n",
257 			test_fw_config->send_uevent ?
258 			"FW_ACTION_HOTPLUG" :
259 			"FW_ACTION_NOHOTPLUG");
260 	len += scnprintf(buf+len, PAGE_SIZE - len,
261 			"into_buf:\t\t%s\n",
262 			test_fw_config->into_buf ? "true" : "false");
263 	len += scnprintf(buf+len, PAGE_SIZE - len,
264 			"sync_direct:\t\t%s\n",
265 			test_fw_config->sync_direct ? "true" : "false");
266 	len += scnprintf(buf+len, PAGE_SIZE - len,
267 			"read_fw_idx:\t%u\n", test_fw_config->read_fw_idx);
268 
269 	mutex_unlock(&test_fw_mutex);
270 
271 	return len;
272 }
273 static DEVICE_ATTR_RO(config);
274 
config_name_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)275 static ssize_t config_name_store(struct device *dev,
276 				 struct device_attribute *attr,
277 				 const char *buf, size_t count)
278 {
279 	int ret;
280 
281 	mutex_lock(&test_fw_mutex);
282 	kfree_const(test_fw_config->name);
283 	ret = __kstrncpy(&test_fw_config->name, buf, count, GFP_KERNEL);
284 	mutex_unlock(&test_fw_mutex);
285 
286 	return ret;
287 }
288 
289 /*
290  * As per sysfs_kf_seq_show() the buf is max PAGE_SIZE.
291  */
config_test_show_str(char * dst,char * src)292 static ssize_t config_test_show_str(char *dst,
293 				    char *src)
294 {
295 	int len;
296 
297 	mutex_lock(&test_fw_mutex);
298 	len = snprintf(dst, PAGE_SIZE, "%s\n", src);
299 	mutex_unlock(&test_fw_mutex);
300 
301 	return len;
302 }
303 
__test_dev_config_update_bool(const char * buf,size_t size,bool * cfg)304 static inline int __test_dev_config_update_bool(const char *buf, size_t size,
305 						bool *cfg)
306 {
307 	int ret;
308 
309 	if (strtobool(buf, cfg) < 0)
310 		ret = -EINVAL;
311 	else
312 		ret = size;
313 
314 	return ret;
315 }
316 
test_dev_config_update_bool(const char * buf,size_t size,bool * cfg)317 static int test_dev_config_update_bool(const char *buf, size_t size,
318 				       bool *cfg)
319 {
320 	int ret;
321 
322 	mutex_lock(&test_fw_mutex);
323 	ret = __test_dev_config_update_bool(buf, size, cfg);
324 	mutex_unlock(&test_fw_mutex);
325 
326 	return ret;
327 }
328 
329 static ssize_t
test_dev_config_show_bool(char * buf,bool config)330 test_dev_config_show_bool(char *buf,
331 			  bool config)
332 {
333 	bool val;
334 
335 	mutex_lock(&test_fw_mutex);
336 	val = config;
337 	mutex_unlock(&test_fw_mutex);
338 
339 	return snprintf(buf, PAGE_SIZE, "%d\n", val);
340 }
341 
test_dev_config_show_int(char * buf,int cfg)342 static ssize_t test_dev_config_show_int(char *buf, int cfg)
343 {
344 	int val;
345 
346 	mutex_lock(&test_fw_mutex);
347 	val = cfg;
348 	mutex_unlock(&test_fw_mutex);
349 
350 	return snprintf(buf, PAGE_SIZE, "%d\n", val);
351 }
352 
__test_dev_config_update_u8(const char * buf,size_t size,u8 * cfg)353 static inline int __test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
354 {
355 	int ret;
356 	long new;
357 
358 	ret = kstrtol(buf, 10, &new);
359 	if (ret)
360 		return ret;
361 
362 	if (new > U8_MAX)
363 		return -EINVAL;
364 
365 	*(u8 *)cfg = new;
366 
367 	/* Always return full write size even if we didn't consume all */
368 	return size;
369 }
370 
test_dev_config_update_u8(const char * buf,size_t size,u8 * cfg)371 static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
372 {
373 	int ret;
374 
375 	mutex_lock(&test_fw_mutex);
376 	ret = __test_dev_config_update_u8(buf, size, cfg);
377 	mutex_unlock(&test_fw_mutex);
378 
379 	return ret;
380 }
381 
test_dev_config_show_u8(char * buf,u8 cfg)382 static ssize_t test_dev_config_show_u8(char *buf, u8 cfg)
383 {
384 	u8 val;
385 
386 	mutex_lock(&test_fw_mutex);
387 	val = cfg;
388 	mutex_unlock(&test_fw_mutex);
389 
390 	return snprintf(buf, PAGE_SIZE, "%u\n", val);
391 }
392 
config_name_show(struct device * dev,struct device_attribute * attr,char * buf)393 static ssize_t config_name_show(struct device *dev,
394 				struct device_attribute *attr,
395 				char *buf)
396 {
397 	return config_test_show_str(buf, test_fw_config->name);
398 }
399 static DEVICE_ATTR_RW(config_name);
400 
config_num_requests_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)401 static ssize_t config_num_requests_store(struct device *dev,
402 					 struct device_attribute *attr,
403 					 const char *buf, size_t count)
404 {
405 	int rc;
406 
407 	mutex_lock(&test_fw_mutex);
408 	if (test_fw_config->reqs) {
409 		pr_err("Must call release_all_firmware prior to changing config\n");
410 		rc = -EINVAL;
411 		mutex_unlock(&test_fw_mutex);
412 		goto out;
413 	}
414 
415 	rc = __test_dev_config_update_u8(buf, count,
416 					 &test_fw_config->num_requests);
417 	mutex_unlock(&test_fw_mutex);
418 
419 out:
420 	return rc;
421 }
422 
config_num_requests_show(struct device * dev,struct device_attribute * attr,char * buf)423 static ssize_t config_num_requests_show(struct device *dev,
424 					struct device_attribute *attr,
425 					char *buf)
426 {
427 	return test_dev_config_show_u8(buf, test_fw_config->num_requests);
428 }
429 static DEVICE_ATTR_RW(config_num_requests);
430 
config_into_buf_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)431 static ssize_t config_into_buf_store(struct device *dev,
432 				     struct device_attribute *attr,
433 				     const char *buf, size_t count)
434 {
435 	return test_dev_config_update_bool(buf,
436 					   count,
437 					   &test_fw_config->into_buf);
438 }
439 
config_into_buf_show(struct device * dev,struct device_attribute * attr,char * buf)440 static ssize_t config_into_buf_show(struct device *dev,
441 				    struct device_attribute *attr,
442 				    char *buf)
443 {
444 	return test_dev_config_show_bool(buf, test_fw_config->into_buf);
445 }
446 static DEVICE_ATTR_RW(config_into_buf);
447 
config_sync_direct_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)448 static ssize_t config_sync_direct_store(struct device *dev,
449 					struct device_attribute *attr,
450 					const char *buf, size_t count)
451 {
452 	int rc = test_dev_config_update_bool(buf, count,
453 					     &test_fw_config->sync_direct);
454 
455 	if (rc == count)
456 		test_fw_config->req_firmware = test_fw_config->sync_direct ?
457 				       request_firmware_direct :
458 				       request_firmware;
459 	return rc;
460 }
461 
config_sync_direct_show(struct device * dev,struct device_attribute * attr,char * buf)462 static ssize_t config_sync_direct_show(struct device *dev,
463 				       struct device_attribute *attr,
464 				       char *buf)
465 {
466 	return test_dev_config_show_bool(buf, test_fw_config->sync_direct);
467 }
468 static DEVICE_ATTR_RW(config_sync_direct);
469 
config_send_uevent_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)470 static ssize_t config_send_uevent_store(struct device *dev,
471 					struct device_attribute *attr,
472 					const char *buf, size_t count)
473 {
474 	return test_dev_config_update_bool(buf, count,
475 					   &test_fw_config->send_uevent);
476 }
477 
config_send_uevent_show(struct device * dev,struct device_attribute * attr,char * buf)478 static ssize_t config_send_uevent_show(struct device *dev,
479 				       struct device_attribute *attr,
480 				       char *buf)
481 {
482 	return test_dev_config_show_bool(buf, test_fw_config->send_uevent);
483 }
484 static DEVICE_ATTR_RW(config_send_uevent);
485 
config_read_fw_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)486 static ssize_t config_read_fw_idx_store(struct device *dev,
487 					struct device_attribute *attr,
488 					const char *buf, size_t count)
489 {
490 	return test_dev_config_update_u8(buf, count,
491 					 &test_fw_config->read_fw_idx);
492 }
493 
config_read_fw_idx_show(struct device * dev,struct device_attribute * attr,char * buf)494 static ssize_t config_read_fw_idx_show(struct device *dev,
495 				       struct device_attribute *attr,
496 				       char *buf)
497 {
498 	return test_dev_config_show_u8(buf, test_fw_config->read_fw_idx);
499 }
500 static DEVICE_ATTR_RW(config_read_fw_idx);
501 
502 
trigger_request_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)503 static ssize_t trigger_request_store(struct device *dev,
504 				     struct device_attribute *attr,
505 				     const char *buf, size_t count)
506 {
507 	int rc;
508 	char *name;
509 
510 	name = kstrndup(buf, count, GFP_KERNEL);
511 	if (!name)
512 		return -ENOMEM;
513 
514 	pr_info("loading '%s'\n", name);
515 
516 	mutex_lock(&test_fw_mutex);
517 	release_firmware(test_firmware);
518 	if (test_fw_config->reqs)
519 		__test_release_all_firmware();
520 	test_firmware = NULL;
521 	rc = request_firmware(&test_firmware, name, dev);
522 	if (rc) {
523 		pr_info("load of '%s' failed: %d\n", name, rc);
524 		goto out;
525 	}
526 	pr_info("loaded: %zu\n", test_firmware->size);
527 	rc = count;
528 
529 out:
530 	mutex_unlock(&test_fw_mutex);
531 
532 	kfree(name);
533 
534 	return rc;
535 }
536 static DEVICE_ATTR_WO(trigger_request);
537 
538 static DECLARE_COMPLETION(async_fw_done);
539 
trigger_async_request_cb(const struct firmware * fw,void * context)540 static void trigger_async_request_cb(const struct firmware *fw, void *context)
541 {
542 	test_firmware = fw;
543 	complete(&async_fw_done);
544 }
545 
trigger_async_request_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)546 static ssize_t trigger_async_request_store(struct device *dev,
547 					   struct device_attribute *attr,
548 					   const char *buf, size_t count)
549 {
550 	int rc;
551 	char *name;
552 
553 	name = kstrndup(buf, count, GFP_KERNEL);
554 	if (!name)
555 		return -ENOMEM;
556 
557 	pr_info("loading '%s'\n", name);
558 
559 	mutex_lock(&test_fw_mutex);
560 	release_firmware(test_firmware);
561 	test_firmware = NULL;
562 	if (test_fw_config->reqs)
563 		__test_release_all_firmware();
564 	rc = request_firmware_nowait(THIS_MODULE, 1, name, dev, GFP_KERNEL,
565 				     NULL, trigger_async_request_cb);
566 	if (rc) {
567 		pr_info("async load of '%s' failed: %d\n", name, rc);
568 		kfree(name);
569 		goto out;
570 	}
571 	/* Free 'name' ASAP, to test for race conditions */
572 	kfree(name);
573 
574 	wait_for_completion(&async_fw_done);
575 
576 	if (test_firmware) {
577 		pr_info("loaded: %zu\n", test_firmware->size);
578 		rc = count;
579 	} else {
580 		pr_err("failed to async load firmware\n");
581 		rc = -ENOMEM;
582 	}
583 
584 out:
585 	mutex_unlock(&test_fw_mutex);
586 
587 	return rc;
588 }
589 static DEVICE_ATTR_WO(trigger_async_request);
590 
trigger_custom_fallback_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)591 static ssize_t trigger_custom_fallback_store(struct device *dev,
592 					     struct device_attribute *attr,
593 					     const char *buf, size_t count)
594 {
595 	int rc;
596 	char *name;
597 
598 	name = kstrndup(buf, count, GFP_KERNEL);
599 	if (!name)
600 		return -ENOMEM;
601 
602 	pr_info("loading '%s' using custom fallback mechanism\n", name);
603 
604 	mutex_lock(&test_fw_mutex);
605 	release_firmware(test_firmware);
606 	if (test_fw_config->reqs)
607 		__test_release_all_firmware();
608 	test_firmware = NULL;
609 	rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG, name,
610 				     dev, GFP_KERNEL, NULL,
611 				     trigger_async_request_cb);
612 	if (rc) {
613 		pr_info("async load of '%s' failed: %d\n", name, rc);
614 		kfree(name);
615 		goto out;
616 	}
617 	/* Free 'name' ASAP, to test for race conditions */
618 	kfree(name);
619 
620 	wait_for_completion(&async_fw_done);
621 
622 	if (test_firmware) {
623 		pr_info("loaded: %zu\n", test_firmware->size);
624 		rc = count;
625 	} else {
626 		pr_err("failed to async load firmware\n");
627 		rc = -ENODEV;
628 	}
629 
630 out:
631 	mutex_unlock(&test_fw_mutex);
632 
633 	return rc;
634 }
635 static DEVICE_ATTR_WO(trigger_custom_fallback);
636 
test_fw_run_batch_request(void * data)637 static int test_fw_run_batch_request(void *data)
638 {
639 	struct test_batched_req *req = data;
640 
641 	if (!req) {
642 		test_fw_config->test_result = -EINVAL;
643 		return -EINVAL;
644 	}
645 
646 	if (test_fw_config->into_buf) {
647 		void *test_buf;
648 
649 		test_buf = kzalloc(TEST_FIRMWARE_BUF_SIZE, GFP_KERNEL);
650 		if (!test_buf)
651 			return -ENOMEM;
652 
653 		req->rc = request_firmware_into_buf(&req->fw,
654 						    req->name,
655 						    req->dev,
656 						    test_buf,
657 						    TEST_FIRMWARE_BUF_SIZE);
658 		if (!req->fw)
659 			kfree(test_buf);
660 		else
661 			req->fw_buf = test_buf;
662 	} else {
663 		req->rc = test_fw_config->req_firmware(&req->fw,
664 						       req->name,
665 						       req->dev);
666 	}
667 
668 	if (req->rc) {
669 		pr_info("#%u: batched sync load failed: %d\n",
670 			req->idx, req->rc);
671 		if (!test_fw_config->test_result)
672 			test_fw_config->test_result = req->rc;
673 	} else if (req->fw) {
674 		req->sent = true;
675 		pr_info("#%u: batched sync loaded %zu\n",
676 			req->idx, req->fw->size);
677 	}
678 	complete(&req->completion);
679 
680 	req->task = NULL;
681 
682 	return 0;
683 }
684 
685 /*
686  * We use a kthread as otherwise the kernel serializes all our sync requests
687  * and we would not be able to mimic batched requests on a sync call. Batched
688  * requests on a sync call can for instance happen on a device driver when
689  * multiple cards are used and firmware loading happens outside of probe.
690  */
trigger_batched_requests_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)691 static ssize_t trigger_batched_requests_store(struct device *dev,
692 					      struct device_attribute *attr,
693 					      const char *buf, size_t count)
694 {
695 	struct test_batched_req *req;
696 	int rc;
697 	u8 i;
698 
699 	mutex_lock(&test_fw_mutex);
700 
701 	if (test_fw_config->reqs) {
702 		rc = -EBUSY;
703 		goto out_bail;
704 	}
705 
706 	test_fw_config->reqs =
707 		vzalloc(array3_size(sizeof(struct test_batched_req),
708 				    test_fw_config->num_requests, 2));
709 	if (!test_fw_config->reqs) {
710 		rc = -ENOMEM;
711 		goto out_unlock;
712 	}
713 
714 	pr_info("batched sync firmware loading '%s' %u times\n",
715 		test_fw_config->name, test_fw_config->num_requests);
716 
717 	for (i = 0; i < test_fw_config->num_requests; i++) {
718 		req = &test_fw_config->reqs[i];
719 		req->fw = NULL;
720 		req->idx = i;
721 		req->name = test_fw_config->name;
722 		req->fw_buf = NULL;
723 		req->dev = dev;
724 		init_completion(&req->completion);
725 		req->task = kthread_run(test_fw_run_batch_request, req,
726 					     "%s-%u", KBUILD_MODNAME, req->idx);
727 		if (!req->task || IS_ERR(req->task)) {
728 			pr_err("Setting up thread %u failed\n", req->idx);
729 			req->task = NULL;
730 			rc = -ENOMEM;
731 			goto out_bail;
732 		}
733 	}
734 
735 	rc = count;
736 
737 	/*
738 	 * We require an explicit release to enable more time and delay of
739 	 * calling release_firmware() to improve our chances of forcing a
740 	 * batched request. If we instead called release_firmware() right away
741 	 * then we might miss on an opportunity of having a successful firmware
742 	 * request pass on the opportunity to be come a batched request.
743 	 */
744 
745 out_bail:
746 	for (i = 0; i < test_fw_config->num_requests; i++) {
747 		req = &test_fw_config->reqs[i];
748 		if (req->task || req->sent)
749 			wait_for_completion(&req->completion);
750 	}
751 
752 	/* Override any worker error if we had a general setup error */
753 	if (rc < 0)
754 		test_fw_config->test_result = rc;
755 
756 out_unlock:
757 	mutex_unlock(&test_fw_mutex);
758 
759 	return rc;
760 }
761 static DEVICE_ATTR_WO(trigger_batched_requests);
762 
763 /*
764  * We wait for each callback to return with the lock held, no need to lock here
765  */
trigger_batched_cb(const struct firmware * fw,void * context)766 static void trigger_batched_cb(const struct firmware *fw, void *context)
767 {
768 	struct test_batched_req *req = context;
769 
770 	if (!req) {
771 		test_fw_config->test_result = -EINVAL;
772 		return;
773 	}
774 
775 	/* forces *some* batched requests to queue up */
776 	if (!req->idx)
777 		ssleep(2);
778 
779 	req->fw = fw;
780 
781 	/*
782 	 * Unfortunately the firmware API gives us nothing other than a null FW
783 	 * if the firmware was not found on async requests.  Best we can do is
784 	 * just assume -ENOENT. A better API would pass the actual return
785 	 * value to the callback.
786 	 */
787 	if (!fw && !test_fw_config->test_result)
788 		test_fw_config->test_result = -ENOENT;
789 
790 	complete(&req->completion);
791 }
792 
793 static
trigger_batched_requests_async_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)794 ssize_t trigger_batched_requests_async_store(struct device *dev,
795 					     struct device_attribute *attr,
796 					     const char *buf, size_t count)
797 {
798 	struct test_batched_req *req;
799 	bool send_uevent;
800 	int rc;
801 	u8 i;
802 
803 	mutex_lock(&test_fw_mutex);
804 
805 	if (test_fw_config->reqs) {
806 		rc = -EBUSY;
807 		goto out_bail;
808 	}
809 
810 	test_fw_config->reqs =
811 		vzalloc(array3_size(sizeof(struct test_batched_req),
812 				    test_fw_config->num_requests, 2));
813 	if (!test_fw_config->reqs) {
814 		rc = -ENOMEM;
815 		goto out;
816 	}
817 
818 	pr_info("batched loading '%s' custom fallback mechanism %u times\n",
819 		test_fw_config->name, test_fw_config->num_requests);
820 
821 	send_uevent = test_fw_config->send_uevent ? FW_ACTION_HOTPLUG :
822 		FW_ACTION_NOHOTPLUG;
823 
824 	for (i = 0; i < test_fw_config->num_requests; i++) {
825 		req = &test_fw_config->reqs[i];
826 		req->name = test_fw_config->name;
827 		req->fw_buf = NULL;
828 		req->fw = NULL;
829 		req->idx = i;
830 		init_completion(&req->completion);
831 		rc = request_firmware_nowait(THIS_MODULE, send_uevent,
832 					     req->name,
833 					     dev, GFP_KERNEL, req,
834 					     trigger_batched_cb);
835 		if (rc) {
836 			pr_info("#%u: batched async load failed setup: %d\n",
837 				i, rc);
838 			req->rc = rc;
839 			goto out_bail;
840 		} else
841 			req->sent = true;
842 	}
843 
844 	rc = count;
845 
846 out_bail:
847 
848 	/*
849 	 * We require an explicit release to enable more time and delay of
850 	 * calling release_firmware() to improve our chances of forcing a
851 	 * batched request. If we instead called release_firmware() right away
852 	 * then we might miss on an opportunity of having a successful firmware
853 	 * request pass on the opportunity to be come a batched request.
854 	 */
855 
856 	for (i = 0; i < test_fw_config->num_requests; i++) {
857 		req = &test_fw_config->reqs[i];
858 		if (req->sent)
859 			wait_for_completion(&req->completion);
860 	}
861 
862 	/* Override any worker error if we had a general setup error */
863 	if (rc < 0)
864 		test_fw_config->test_result = rc;
865 
866 out:
867 	mutex_unlock(&test_fw_mutex);
868 
869 	return rc;
870 }
871 static DEVICE_ATTR_WO(trigger_batched_requests_async);
872 
test_result_show(struct device * dev,struct device_attribute * attr,char * buf)873 static ssize_t test_result_show(struct device *dev,
874 				struct device_attribute *attr,
875 				char *buf)
876 {
877 	return test_dev_config_show_int(buf, test_fw_config->test_result);
878 }
879 static DEVICE_ATTR_RO(test_result);
880 
release_all_firmware_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)881 static ssize_t release_all_firmware_store(struct device *dev,
882 					  struct device_attribute *attr,
883 					  const char *buf, size_t count)
884 {
885 	test_release_all_firmware();
886 	return count;
887 }
888 static DEVICE_ATTR_WO(release_all_firmware);
889 
read_firmware_show(struct device * dev,struct device_attribute * attr,char * buf)890 static ssize_t read_firmware_show(struct device *dev,
891 				  struct device_attribute *attr,
892 				  char *buf)
893 {
894 	struct test_batched_req *req;
895 	u8 idx;
896 	ssize_t rc = 0;
897 
898 	mutex_lock(&test_fw_mutex);
899 
900 	idx = test_fw_config->read_fw_idx;
901 	if (idx >= test_fw_config->num_requests) {
902 		rc = -ERANGE;
903 		goto out;
904 	}
905 
906 	if (!test_fw_config->reqs) {
907 		rc = -EINVAL;
908 		goto out;
909 	}
910 
911 	req = &test_fw_config->reqs[idx];
912 	if (!req->fw) {
913 		pr_err("#%u: failed to async load firmware\n", idx);
914 		rc = -ENOENT;
915 		goto out;
916 	}
917 
918 	pr_info("#%u: loaded %zu\n", idx, req->fw->size);
919 
920 	if (req->fw->size > PAGE_SIZE) {
921 		pr_err("Testing interface must use PAGE_SIZE firmware for now\n");
922 		rc = -EINVAL;
923 		goto out;
924 	}
925 	memcpy(buf, req->fw->data, req->fw->size);
926 
927 	rc = req->fw->size;
928 out:
929 	mutex_unlock(&test_fw_mutex);
930 
931 	return rc;
932 }
933 static DEVICE_ATTR_RO(read_firmware);
934 
935 #define TEST_FW_DEV_ATTR(name)          &dev_attr_##name.attr
936 
937 static struct attribute *test_dev_attrs[] = {
938 	TEST_FW_DEV_ATTR(reset),
939 
940 	TEST_FW_DEV_ATTR(config),
941 	TEST_FW_DEV_ATTR(config_name),
942 	TEST_FW_DEV_ATTR(config_num_requests),
943 	TEST_FW_DEV_ATTR(config_into_buf),
944 	TEST_FW_DEV_ATTR(config_sync_direct),
945 	TEST_FW_DEV_ATTR(config_send_uevent),
946 	TEST_FW_DEV_ATTR(config_read_fw_idx),
947 
948 	/* These don't use the config at all - they could be ported! */
949 	TEST_FW_DEV_ATTR(trigger_request),
950 	TEST_FW_DEV_ATTR(trigger_async_request),
951 	TEST_FW_DEV_ATTR(trigger_custom_fallback),
952 
953 	/* These use the config and can use the test_result */
954 	TEST_FW_DEV_ATTR(trigger_batched_requests),
955 	TEST_FW_DEV_ATTR(trigger_batched_requests_async),
956 
957 	TEST_FW_DEV_ATTR(release_all_firmware),
958 	TEST_FW_DEV_ATTR(test_result),
959 	TEST_FW_DEV_ATTR(read_firmware),
960 	NULL,
961 };
962 
963 ATTRIBUTE_GROUPS(test_dev);
964 
965 static struct miscdevice test_fw_misc_device = {
966 	.minor          = MISC_DYNAMIC_MINOR,
967 	.name           = "test_firmware",
968 	.fops           = &test_fw_fops,
969 	.groups 	= test_dev_groups,
970 };
971 
test_firmware_init(void)972 static int __init test_firmware_init(void)
973 {
974 	int rc;
975 
976 	test_fw_config = kzalloc(sizeof(struct test_config), GFP_KERNEL);
977 	if (!test_fw_config)
978 		return -ENOMEM;
979 
980 	rc = __test_firmware_config_init();
981 	if (rc) {
982 		kfree(test_fw_config);
983 		pr_err("could not init firmware test config: %d\n", rc);
984 		return rc;
985 	}
986 
987 	rc = misc_register(&test_fw_misc_device);
988 	if (rc) {
989 		__test_firmware_config_free();
990 		kfree(test_fw_config);
991 		pr_err("could not register misc device: %d\n", rc);
992 		return rc;
993 	}
994 
995 	pr_warn("interface ready\n");
996 
997 	return 0;
998 }
999 
1000 module_init(test_firmware_init);
1001 
test_firmware_exit(void)1002 static void __exit test_firmware_exit(void)
1003 {
1004 	mutex_lock(&test_fw_mutex);
1005 	release_firmware(test_firmware);
1006 	misc_deregister(&test_fw_misc_device);
1007 	__test_firmware_config_free();
1008 	kfree(test_fw_config);
1009 	mutex_unlock(&test_fw_mutex);
1010 
1011 	pr_warn("removed interface\n");
1012 }
1013 
1014 module_exit(test_firmware_exit);
1015 
1016 MODULE_AUTHOR("Kees Cook <keescook@chromium.org>");
1017 MODULE_LICENSE("GPL");
1018