• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * main.c - Multi purpose firmware loading support
4  *
5  * Copyright (c) 2003 Manuel Estrada Sainz
6  *
7  * Please see Documentation/driver-api/firmware/ for more information.
8  *
9  */
10 
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 
13 #include <linux/capability.h>
14 #include <linux/device.h>
15 #include <linux/kernel_read_file.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/initrd.h>
19 #include <linux/timer.h>
20 #include <linux/vmalloc.h>
21 #include <linux/interrupt.h>
22 #include <linux/bitops.h>
23 #include <linux/mutex.h>
24 #include <linux/workqueue.h>
25 #include <linux/highmem.h>
26 #include <linux/firmware.h>
27 #include <linux/slab.h>
28 #include <linux/sched.h>
29 #include <linux/file.h>
30 #include <linux/list.h>
31 #include <linux/fs.h>
32 #include <linux/async.h>
33 #include <linux/pm.h>
34 #include <linux/suspend.h>
35 #include <linux/syscore_ops.h>
36 #include <linux/reboot.h>
37 #include <linux/security.h>
38 #include <linux/xz.h>
39 
40 #include <generated/utsrelease.h>
41 
42 #include "../base.h"
43 #include "firmware.h"
44 #include "fallback.h"
45 
46 MODULE_AUTHOR("Manuel Estrada Sainz");
47 MODULE_DESCRIPTION("Multi purpose firmware loading support");
48 MODULE_LICENSE("GPL");
49 
50 struct firmware_cache {
51 	/* firmware_buf instance will be added into the below list */
52 	spinlock_t lock;
53 	struct list_head head;
54 	int state;
55 
56 #ifdef CONFIG_FW_CACHE
57 	/*
58 	 * Names of firmware images which have been cached successfully
59 	 * will be added into the below list so that device uncache
60 	 * helper can trace which firmware images have been cached
61 	 * before.
62 	 */
63 	spinlock_t name_lock;
64 	struct list_head fw_names;
65 
66 	struct delayed_work work;
67 
68 	struct notifier_block   pm_notify;
69 #endif
70 };
71 
72 struct fw_cache_entry {
73 	struct list_head list;
74 	const char *name;
75 };
76 
77 struct fw_name_devm {
78 	unsigned long magic;
79 	const char *name;
80 };
81 
to_fw_priv(struct kref * ref)82 static inline struct fw_priv *to_fw_priv(struct kref *ref)
83 {
84 	return container_of(ref, struct fw_priv, ref);
85 }
86 
87 #define	FW_LOADER_NO_CACHE	0
88 #define	FW_LOADER_START_CACHE	1
89 
90 /* fw_lock could be moved to 'struct fw_sysfs' but since it is just
91  * guarding for corner cases a global lock should be OK */
92 DEFINE_MUTEX(fw_lock);
93 
94 static struct firmware_cache fw_cache;
95 bool fw_load_abort_all;
96 
97 /* Builtin firmware support */
98 
99 #ifdef CONFIG_FW_LOADER
100 
101 extern struct builtin_fw __start_builtin_fw[];
102 extern struct builtin_fw __end_builtin_fw[];
103 
fw_copy_to_prealloc_buf(struct firmware * fw,void * buf,size_t size)104 static bool fw_copy_to_prealloc_buf(struct firmware *fw,
105 				    void *buf, size_t size)
106 {
107 	if (!buf)
108 		return true;
109 	if (size < fw->size)
110 		return false;
111 	memcpy(buf, fw->data, fw->size);
112 	return true;
113 }
114 
fw_get_builtin_firmware(struct firmware * fw,const char * name,void * buf,size_t size)115 static bool fw_get_builtin_firmware(struct firmware *fw, const char *name,
116 				    void *buf, size_t size)
117 {
118 	struct builtin_fw *b_fw;
119 
120 	for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
121 		if (strcmp(name, b_fw->name) == 0) {
122 			fw->size = b_fw->size;
123 			fw->data = b_fw->data;
124 			return fw_copy_to_prealloc_buf(fw, buf, size);
125 		}
126 	}
127 
128 	return false;
129 }
130 
fw_is_builtin_firmware(const struct firmware * fw)131 static bool fw_is_builtin_firmware(const struct firmware *fw)
132 {
133 	struct builtin_fw *b_fw;
134 
135 	for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++)
136 		if (fw->data == b_fw->data)
137 			return true;
138 
139 	return false;
140 }
141 
142 #else /* Module case - no builtin firmware support */
143 
fw_get_builtin_firmware(struct firmware * fw,const char * name,void * buf,size_t size)144 static inline bool fw_get_builtin_firmware(struct firmware *fw,
145 					   const char *name, void *buf,
146 					   size_t size)
147 {
148 	return false;
149 }
150 
fw_is_builtin_firmware(const struct firmware * fw)151 static inline bool fw_is_builtin_firmware(const struct firmware *fw)
152 {
153 	return false;
154 }
155 #endif
156 
fw_state_init(struct fw_priv * fw_priv)157 static void fw_state_init(struct fw_priv *fw_priv)
158 {
159 	struct fw_state *fw_st = &fw_priv->fw_st;
160 
161 	init_completion(&fw_st->completion);
162 	fw_st->status = FW_STATUS_UNKNOWN;
163 }
164 
fw_state_wait(struct fw_priv * fw_priv)165 static inline int fw_state_wait(struct fw_priv *fw_priv)
166 {
167 	return __fw_state_wait_common(fw_priv, MAX_SCHEDULE_TIMEOUT);
168 }
169 
170 static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv);
171 
__allocate_fw_priv(const char * fw_name,struct firmware_cache * fwc,void * dbuf,size_t size,size_t offset,u32 opt_flags)172 static struct fw_priv *__allocate_fw_priv(const char *fw_name,
173 					  struct firmware_cache *fwc,
174 					  void *dbuf,
175 					  size_t size,
176 					  size_t offset,
177 					  u32 opt_flags)
178 {
179 	struct fw_priv *fw_priv;
180 
181 	/* For a partial read, the buffer must be preallocated. */
182 	if ((opt_flags & FW_OPT_PARTIAL) && !dbuf)
183 		return NULL;
184 
185 	/* Only partial reads are allowed to use an offset. */
186 	if (offset != 0 && !(opt_flags & FW_OPT_PARTIAL))
187 		return NULL;
188 
189 	fw_priv = kzalloc(sizeof(*fw_priv), GFP_ATOMIC);
190 	if (!fw_priv)
191 		return NULL;
192 
193 	fw_priv->fw_name = kstrdup_const(fw_name, GFP_ATOMIC);
194 	if (!fw_priv->fw_name) {
195 		kfree(fw_priv);
196 		return NULL;
197 	}
198 
199 	kref_init(&fw_priv->ref);
200 	fw_priv->fwc = fwc;
201 	fw_priv->data = dbuf;
202 	fw_priv->allocated_size = size;
203 	fw_priv->offset = offset;
204 	fw_priv->opt_flags = opt_flags;
205 	fw_state_init(fw_priv);
206 #ifdef CONFIG_FW_LOADER_USER_HELPER
207 	INIT_LIST_HEAD(&fw_priv->pending_list);
208 #endif
209 
210 	pr_debug("%s: fw-%s fw_priv=%p\n", __func__, fw_name, fw_priv);
211 
212 	return fw_priv;
213 }
214 
__lookup_fw_priv(const char * fw_name)215 static struct fw_priv *__lookup_fw_priv(const char *fw_name)
216 {
217 	struct fw_priv *tmp;
218 	struct firmware_cache *fwc = &fw_cache;
219 
220 	list_for_each_entry(tmp, &fwc->head, list)
221 		if (!strcmp(tmp->fw_name, fw_name))
222 			return tmp;
223 	return NULL;
224 }
225 
226 /* Returns 1 for batching firmware requests with the same name */
alloc_lookup_fw_priv(const char * fw_name,struct firmware_cache * fwc,struct fw_priv ** fw_priv,void * dbuf,size_t size,size_t offset,u32 opt_flags)227 static int alloc_lookup_fw_priv(const char *fw_name,
228 				struct firmware_cache *fwc,
229 				struct fw_priv **fw_priv,
230 				void *dbuf,
231 				size_t size,
232 				size_t offset,
233 				u32 opt_flags)
234 {
235 	struct fw_priv *tmp;
236 
237 	spin_lock(&fwc->lock);
238 	/*
239 	 * Do not merge requests that are marked to be non-cached or
240 	 * are performing partial reads.
241 	 */
242 	if (!(opt_flags & (FW_OPT_NOCACHE | FW_OPT_PARTIAL))) {
243 		tmp = __lookup_fw_priv(fw_name);
244 		if (tmp) {
245 			kref_get(&tmp->ref);
246 			spin_unlock(&fwc->lock);
247 			*fw_priv = tmp;
248 			pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n");
249 			return 1;
250 		}
251 	}
252 
253 	tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size, offset, opt_flags);
254 	if (tmp) {
255 		INIT_LIST_HEAD(&tmp->list);
256 		if (!(opt_flags & FW_OPT_NOCACHE))
257 			list_add(&tmp->list, &fwc->head);
258 	}
259 	spin_unlock(&fwc->lock);
260 
261 	*fw_priv = tmp;
262 
263 	return tmp ? 0 : -ENOMEM;
264 }
265 
__free_fw_priv(struct kref * ref)266 static void __free_fw_priv(struct kref *ref)
267 	__releases(&fwc->lock)
268 {
269 	struct fw_priv *fw_priv = to_fw_priv(ref);
270 	struct firmware_cache *fwc = fw_priv->fwc;
271 
272 	pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n",
273 		 __func__, fw_priv->fw_name, fw_priv, fw_priv->data,
274 		 (unsigned int)fw_priv->size);
275 
276 	list_del(&fw_priv->list);
277 	spin_unlock(&fwc->lock);
278 
279 	if (fw_is_paged_buf(fw_priv))
280 		fw_free_paged_buf(fw_priv);
281 	else if (!fw_priv->allocated_size)
282 		vfree(fw_priv->data);
283 
284 	kfree_const(fw_priv->fw_name);
285 	kfree(fw_priv);
286 }
287 
free_fw_priv(struct fw_priv * fw_priv)288 static void free_fw_priv(struct fw_priv *fw_priv)
289 {
290 	struct firmware_cache *fwc = fw_priv->fwc;
291 	spin_lock(&fwc->lock);
292 	if (!kref_put(&fw_priv->ref, __free_fw_priv))
293 		spin_unlock(&fwc->lock);
294 }
295 
296 #ifdef CONFIG_FW_LOADER_PAGED_BUF
fw_is_paged_buf(struct fw_priv * fw_priv)297 bool fw_is_paged_buf(struct fw_priv *fw_priv)
298 {
299 	return fw_priv->is_paged_buf;
300 }
301 
fw_free_paged_buf(struct fw_priv * fw_priv)302 void fw_free_paged_buf(struct fw_priv *fw_priv)
303 {
304 	int i;
305 
306 	if (!fw_priv->pages)
307 		return;
308 
309 	vunmap(fw_priv->data);
310 
311 	for (i = 0; i < fw_priv->nr_pages; i++)
312 		__free_page(fw_priv->pages[i]);
313 	kvfree(fw_priv->pages);
314 	fw_priv->pages = NULL;
315 	fw_priv->page_array_size = 0;
316 	fw_priv->nr_pages = 0;
317 }
318 
fw_grow_paged_buf(struct fw_priv * fw_priv,int pages_needed)319 int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed)
320 {
321 	/* If the array of pages is too small, grow it */
322 	if (fw_priv->page_array_size < pages_needed) {
323 		int new_array_size = max(pages_needed,
324 					 fw_priv->page_array_size * 2);
325 		struct page **new_pages;
326 
327 		new_pages = kvmalloc_array(new_array_size, sizeof(void *),
328 					   GFP_KERNEL);
329 		if (!new_pages)
330 			return -ENOMEM;
331 		memcpy(new_pages, fw_priv->pages,
332 		       fw_priv->page_array_size * sizeof(void *));
333 		memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) *
334 		       (new_array_size - fw_priv->page_array_size));
335 		kvfree(fw_priv->pages);
336 		fw_priv->pages = new_pages;
337 		fw_priv->page_array_size = new_array_size;
338 	}
339 
340 	while (fw_priv->nr_pages < pages_needed) {
341 		fw_priv->pages[fw_priv->nr_pages] =
342 			alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
343 
344 		if (!fw_priv->pages[fw_priv->nr_pages])
345 			return -ENOMEM;
346 		fw_priv->nr_pages++;
347 	}
348 
349 	return 0;
350 }
351 
fw_map_paged_buf(struct fw_priv * fw_priv)352 int fw_map_paged_buf(struct fw_priv *fw_priv)
353 {
354 	/* one pages buffer should be mapped/unmapped only once */
355 	if (!fw_priv->pages)
356 		return 0;
357 
358 	vunmap(fw_priv->data);
359 	fw_priv->data = vmap(fw_priv->pages, fw_priv->nr_pages, 0,
360 			     PAGE_KERNEL_RO);
361 	if (!fw_priv->data)
362 		return -ENOMEM;
363 
364 	return 0;
365 }
366 #endif
367 
368 /*
369  * XZ-compressed firmware support
370  */
371 #ifdef CONFIG_FW_LOADER_COMPRESS
372 /* show an error and return the standard error code */
fw_decompress_xz_error(struct device * dev,enum xz_ret xz_ret)373 static int fw_decompress_xz_error(struct device *dev, enum xz_ret xz_ret)
374 {
375 	if (xz_ret != XZ_STREAM_END) {
376 		dev_warn(dev, "xz decompression failed (xz_ret=%d)\n", xz_ret);
377 		return xz_ret == XZ_MEM_ERROR ? -ENOMEM : -EINVAL;
378 	}
379 	return 0;
380 }
381 
382 /* single-shot decompression onto the pre-allocated buffer */
fw_decompress_xz_single(struct device * dev,struct fw_priv * fw_priv,size_t in_size,const void * in_buffer)383 static int fw_decompress_xz_single(struct device *dev, struct fw_priv *fw_priv,
384 				   size_t in_size, const void *in_buffer)
385 {
386 	struct xz_dec *xz_dec;
387 	struct xz_buf xz_buf;
388 	enum xz_ret xz_ret;
389 
390 	xz_dec = xz_dec_init(XZ_SINGLE, (u32)-1);
391 	if (!xz_dec)
392 		return -ENOMEM;
393 
394 	xz_buf.in_size = in_size;
395 	xz_buf.in = in_buffer;
396 	xz_buf.in_pos = 0;
397 	xz_buf.out_size = fw_priv->allocated_size;
398 	xz_buf.out = fw_priv->data;
399 	xz_buf.out_pos = 0;
400 
401 	xz_ret = xz_dec_run(xz_dec, &xz_buf);
402 	xz_dec_end(xz_dec);
403 
404 	fw_priv->size = xz_buf.out_pos;
405 	return fw_decompress_xz_error(dev, xz_ret);
406 }
407 
408 /* decompression on paged buffer and map it */
fw_decompress_xz_pages(struct device * dev,struct fw_priv * fw_priv,size_t in_size,const void * in_buffer)409 static int fw_decompress_xz_pages(struct device *dev, struct fw_priv *fw_priv,
410 				  size_t in_size, const void *in_buffer)
411 {
412 	struct xz_dec *xz_dec;
413 	struct xz_buf xz_buf;
414 	enum xz_ret xz_ret;
415 	struct page *page;
416 	int err = 0;
417 
418 	xz_dec = xz_dec_init(XZ_DYNALLOC, (u32)-1);
419 	if (!xz_dec)
420 		return -ENOMEM;
421 
422 	xz_buf.in_size = in_size;
423 	xz_buf.in = in_buffer;
424 	xz_buf.in_pos = 0;
425 
426 	fw_priv->is_paged_buf = true;
427 	fw_priv->size = 0;
428 	do {
429 		if (fw_grow_paged_buf(fw_priv, fw_priv->nr_pages + 1)) {
430 			err = -ENOMEM;
431 			goto out;
432 		}
433 
434 		/* decompress onto the new allocated page */
435 		page = fw_priv->pages[fw_priv->nr_pages - 1];
436 		xz_buf.out = kmap(page);
437 		xz_buf.out_pos = 0;
438 		xz_buf.out_size = PAGE_SIZE;
439 		xz_ret = xz_dec_run(xz_dec, &xz_buf);
440 		kunmap(page);
441 		fw_priv->size += xz_buf.out_pos;
442 		/* partial decompression means either end or error */
443 		if (xz_buf.out_pos != PAGE_SIZE)
444 			break;
445 	} while (xz_ret == XZ_OK);
446 
447 	err = fw_decompress_xz_error(dev, xz_ret);
448 	if (!err)
449 		err = fw_map_paged_buf(fw_priv);
450 
451  out:
452 	xz_dec_end(xz_dec);
453 	return err;
454 }
455 
fw_decompress_xz(struct device * dev,struct fw_priv * fw_priv,size_t in_size,const void * in_buffer)456 static int fw_decompress_xz(struct device *dev, struct fw_priv *fw_priv,
457 			    size_t in_size, const void *in_buffer)
458 {
459 	/* if the buffer is pre-allocated, we can perform in single-shot mode */
460 	if (fw_priv->data)
461 		return fw_decompress_xz_single(dev, fw_priv, in_size, in_buffer);
462 	else
463 		return fw_decompress_xz_pages(dev, fw_priv, in_size, in_buffer);
464 }
465 #endif /* CONFIG_FW_LOADER_COMPRESS */
466 
467 /* direct firmware loading support */
468 #define CUSTOM_FW_PATH_COUNT	10
469 #define PATH_SIZE		255
470 static char fw_path_para[CUSTOM_FW_PATH_COUNT][PATH_SIZE];
471 static const char * const fw_path[] = {
472 	fw_path_para[0],
473 	fw_path_para[1],
474 	fw_path_para[2],
475 	fw_path_para[3],
476 	fw_path_para[4],
477 	fw_path_para[5],
478 	fw_path_para[6],
479 	fw_path_para[7],
480 	fw_path_para[8],
481 	fw_path_para[9],
482 	"/lib/firmware/updates/" UTS_RELEASE,
483 	"/lib/firmware/updates",
484 	"/lib/firmware/" UTS_RELEASE,
485 	"/lib/firmware"
486 };
487 
488 static char strpath[PATH_SIZE * CUSTOM_FW_PATH_COUNT];
firmware_param_path_set(const char * val,const struct kernel_param * kp)489 static int firmware_param_path_set(const char *val, const struct kernel_param *kp)
490 {
491 	int i;
492 	char *path, *end;
493 
494 	strscpy(strpath, val, sizeof(strpath));
495 	/* Remove leading and trailing spaces from path */
496 	path = strim(strpath);
497 	for (i = 0; path && i < CUSTOM_FW_PATH_COUNT; i++) {
498 		end = strchr(path, ',');
499 
500 		/* Skip continuous token case, for example ',,,' */
501 		if (end == path) {
502 			i--;
503 			path = ++end;
504 			continue;
505 		}
506 
507 		if (end != NULL)
508 			*end = '\0';
509 		else {
510 			/* end of the string reached and no other tockens ','  */
511 			strscpy(fw_path_para[i], path, PATH_SIZE);
512 			break;
513 		}
514 
515 		strscpy(fw_path_para[i], path, PATH_SIZE);
516 		path = ++end;
517 	}
518 
519 	return 0;
520 }
521 
firmware_param_path_get(char * buffer,const struct kernel_param * kp)522 static int firmware_param_path_get(char *buffer, const struct kernel_param *kp)
523 {
524 	int count = 0, i;
525 
526 	for (i = 0; i < CUSTOM_FW_PATH_COUNT; i++) {
527 		if (strlen(fw_path_para[i]) != 0)
528 			count += scnprintf(buffer + count, PATH_SIZE, "%s%s", fw_path_para[i], ",");
529 	}
530 
531 	buffer[count - 1] = '\0';
532 
533 	return count - 1;
534 }
535 /*
536  * Typical usage is that passing 'firmware_class.path=/vendor,/firwmare_mnt'
537  * from kernel command line because firmware_class is generally built in
538  * kernel instead of module. ',' is used as delimiter for setting 10
539  * custom paths for firmware loader.
540  */
541 
542 static const struct kernel_param_ops firmware_param_ops = {
543 	.set = firmware_param_path_set,
544 	.get = firmware_param_path_get,
545 };
546 module_param_cb(path, &firmware_param_ops, NULL, 0644);
547 MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
548 
549 static int
fw_get_filesystem_firmware(struct device * device,struct fw_priv * fw_priv,const char * suffix,int (* decompress)(struct device * dev,struct fw_priv * fw_priv,size_t in_size,const void * in_buffer))550 fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv,
551 			   const char *suffix,
552 			   int (*decompress)(struct device *dev,
553 					     struct fw_priv *fw_priv,
554 					     size_t in_size,
555 					     const void *in_buffer))
556 {
557 	size_t size;
558 	int i, len;
559 	int rc = -ENOENT;
560 	char *path;
561 	size_t msize = INT_MAX;
562 	void *buffer = NULL;
563 
564 	/* Already populated data member means we're loading into a buffer */
565 	if (!decompress && fw_priv->data) {
566 		buffer = fw_priv->data;
567 		msize = fw_priv->allocated_size;
568 	}
569 
570 	path = __getname();
571 	if (!path)
572 		return -ENOMEM;
573 
574 	wait_for_initramfs();
575 	for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
576 		size_t file_size = 0;
577 		size_t *file_size_ptr = NULL;
578 
579 		/* skip the unset customized path */
580 		if (!fw_path[i][0])
581 			continue;
582 
583 		len = snprintf(path, PATH_MAX, "%s/%s%s",
584 			       fw_path[i], fw_priv->fw_name, suffix);
585 		if (len >= PATH_MAX) {
586 			rc = -ENAMETOOLONG;
587 			break;
588 		}
589 
590 		fw_priv->size = 0;
591 
592 		/*
593 		 * The total file size is only examined when doing a partial
594 		 * read; the "full read" case needs to fail if the whole
595 		 * firmware was not completely loaded.
596 		 */
597 		if ((fw_priv->opt_flags & FW_OPT_PARTIAL) && buffer)
598 			file_size_ptr = &file_size;
599 
600 		/* load firmware files from the mount namespace of init */
601 		rc = kernel_read_file_from_path_initns(path, fw_priv->offset,
602 						       &buffer, msize,
603 						       file_size_ptr,
604 						       READING_FIRMWARE);
605 		if (rc < 0) {
606 			if (rc != -ENOENT)
607 				dev_warn(device, "loading %s failed with error %d\n",
608 					 path, rc);
609 			else
610 				dev_dbg(device, "loading %s failed for no such file or directory.\n",
611 					 path);
612 			continue;
613 		}
614 		size = rc;
615 		rc = 0;
616 
617 		dev_dbg(device, "Loading firmware from %s\n", path);
618 		if (decompress) {
619 			dev_dbg(device, "f/w decompressing %s\n",
620 				fw_priv->fw_name);
621 			rc = decompress(device, fw_priv, size, buffer);
622 			/* discard the superfluous original content */
623 			vfree(buffer);
624 			buffer = NULL;
625 			if (rc) {
626 				fw_free_paged_buf(fw_priv);
627 				continue;
628 			}
629 		} else {
630 			dev_dbg(device, "direct-loading %s\n",
631 				fw_priv->fw_name);
632 			if (!fw_priv->data)
633 				fw_priv->data = buffer;
634 			fw_priv->size = size;
635 		}
636 		fw_state_done(fw_priv);
637 		break;
638 	}
639 	__putname(path);
640 
641 	return rc;
642 }
643 
644 /* firmware holds the ownership of pages */
firmware_free_data(const struct firmware * fw)645 static void firmware_free_data(const struct firmware *fw)
646 {
647 	/* Loaded directly? */
648 	if (!fw->priv) {
649 		vfree(fw->data);
650 		return;
651 	}
652 	free_fw_priv(fw->priv);
653 }
654 
655 /* store the pages buffer info firmware from buf */
fw_set_page_data(struct fw_priv * fw_priv,struct firmware * fw)656 static void fw_set_page_data(struct fw_priv *fw_priv, struct firmware *fw)
657 {
658 	fw->priv = fw_priv;
659 	fw->size = fw_priv->size;
660 	fw->data = fw_priv->data;
661 
662 	pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n",
663 		 __func__, fw_priv->fw_name, fw_priv, fw_priv->data,
664 		 (unsigned int)fw_priv->size);
665 }
666 
667 #ifdef CONFIG_FW_CACHE
fw_name_devm_release(struct device * dev,void * res)668 static void fw_name_devm_release(struct device *dev, void *res)
669 {
670 	struct fw_name_devm *fwn = res;
671 
672 	if (fwn->magic == (unsigned long)&fw_cache)
673 		pr_debug("%s: fw_name-%s devm-%p released\n",
674 				__func__, fwn->name, res);
675 	kfree_const(fwn->name);
676 }
677 
fw_devm_match(struct device * dev,void * res,void * match_data)678 static int fw_devm_match(struct device *dev, void *res,
679 		void *match_data)
680 {
681 	struct fw_name_devm *fwn = res;
682 
683 	return (fwn->magic == (unsigned long)&fw_cache) &&
684 		!strcmp(fwn->name, match_data);
685 }
686 
fw_find_devm_name(struct device * dev,const char * name)687 static struct fw_name_devm *fw_find_devm_name(struct device *dev,
688 		const char *name)
689 {
690 	struct fw_name_devm *fwn;
691 
692 	fwn = devres_find(dev, fw_name_devm_release,
693 			  fw_devm_match, (void *)name);
694 	return fwn;
695 }
696 
fw_cache_is_setup(struct device * dev,const char * name)697 static bool fw_cache_is_setup(struct device *dev, const char *name)
698 {
699 	struct fw_name_devm *fwn;
700 
701 	fwn = fw_find_devm_name(dev, name);
702 	if (fwn)
703 		return true;
704 
705 	return false;
706 }
707 
708 /* add firmware name into devres list */
fw_add_devm_name(struct device * dev,const char * name)709 static int fw_add_devm_name(struct device *dev, const char *name)
710 {
711 	struct fw_name_devm *fwn;
712 
713 	if (fw_cache_is_setup(dev, name))
714 		return 0;
715 
716 	fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm),
717 			   GFP_KERNEL);
718 	if (!fwn)
719 		return -ENOMEM;
720 	fwn->name = kstrdup_const(name, GFP_KERNEL);
721 	if (!fwn->name) {
722 		devres_free(fwn);
723 		return -ENOMEM;
724 	}
725 
726 	fwn->magic = (unsigned long)&fw_cache;
727 	devres_add(dev, fwn);
728 
729 	return 0;
730 }
731 #else
fw_cache_is_setup(struct device * dev,const char * name)732 static bool fw_cache_is_setup(struct device *dev, const char *name)
733 {
734 	return false;
735 }
736 
fw_add_devm_name(struct device * dev,const char * name)737 static int fw_add_devm_name(struct device *dev, const char *name)
738 {
739 	return 0;
740 }
741 #endif
742 
assign_fw(struct firmware * fw,struct device * device)743 int assign_fw(struct firmware *fw, struct device *device)
744 {
745 	struct fw_priv *fw_priv = fw->priv;
746 	int ret;
747 
748 	mutex_lock(&fw_lock);
749 	if (!fw_priv->size || fw_state_is_aborted(fw_priv)) {
750 		mutex_unlock(&fw_lock);
751 		return -ENOENT;
752 	}
753 
754 	/*
755 	 * add firmware name into devres list so that we can auto cache
756 	 * and uncache firmware for device.
757 	 *
758 	 * device may has been deleted already, but the problem
759 	 * should be fixed in devres or driver core.
760 	 */
761 	/* don't cache firmware handled without uevent */
762 	if (device && (fw_priv->opt_flags & FW_OPT_UEVENT) &&
763 	    !(fw_priv->opt_flags & FW_OPT_NOCACHE)) {
764 		ret = fw_add_devm_name(device, fw_priv->fw_name);
765 		if (ret) {
766 			mutex_unlock(&fw_lock);
767 			return ret;
768 		}
769 	}
770 
771 	/*
772 	 * After caching firmware image is started, let it piggyback
773 	 * on request firmware.
774 	 */
775 	if (!(fw_priv->opt_flags & FW_OPT_NOCACHE) &&
776 	    fw_priv->fwc->state == FW_LOADER_START_CACHE)
777 		fw_cache_piggyback_on_request(fw_priv);
778 
779 	/* pass the pages buffer to driver at the last minute */
780 	fw_set_page_data(fw_priv, fw);
781 	mutex_unlock(&fw_lock);
782 	return 0;
783 }
784 
785 /* prepare firmware and firmware_buf structs;
786  * return 0 if a firmware is already assigned, 1 if need to load one,
787  * or a negative error code
788  */
789 static int
_request_firmware_prepare(struct firmware ** firmware_p,const char * name,struct device * device,void * dbuf,size_t size,size_t offset,u32 opt_flags)790 _request_firmware_prepare(struct firmware **firmware_p, const char *name,
791 			  struct device *device, void *dbuf, size_t size,
792 			  size_t offset, u32 opt_flags)
793 {
794 	struct firmware *firmware;
795 	struct fw_priv *fw_priv;
796 	int ret;
797 
798 	*firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
799 	if (!firmware) {
800 		dev_err(device, "%s: kmalloc(struct firmware) failed\n",
801 			__func__);
802 		return -ENOMEM;
803 	}
804 
805 	if (fw_get_builtin_firmware(firmware, name, dbuf, size)) {
806 		dev_dbg(device, "using built-in %s\n", name);
807 		return 0; /* assigned */
808 	}
809 
810 	ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size,
811 				   offset, opt_flags);
812 
813 	/*
814 	 * bind with 'priv' now to avoid warning in failure path
815 	 * of requesting firmware.
816 	 */
817 	firmware->priv = fw_priv;
818 
819 	if (ret > 0) {
820 		ret = fw_state_wait(fw_priv);
821 		if (!ret) {
822 			fw_set_page_data(fw_priv, firmware);
823 			return 0; /* assigned */
824 		}
825 	}
826 
827 	if (ret < 0)
828 		return ret;
829 	return 1; /* need to load */
830 }
831 
832 /*
833  * Batched requests need only one wake, we need to do this step last due to the
834  * fallback mechanism. The buf is protected with kref_get(), and it won't be
835  * released until the last user calls release_firmware().
836  *
837  * Failed batched requests are possible as well, in such cases we just share
838  * the struct fw_priv and won't release it until all requests are woken
839  * and have gone through this same path.
840  */
fw_abort_batch_reqs(struct firmware * fw)841 static void fw_abort_batch_reqs(struct firmware *fw)
842 {
843 	struct fw_priv *fw_priv;
844 
845 	/* Loaded directly? */
846 	if (!fw || !fw->priv)
847 		return;
848 
849 	fw_priv = fw->priv;
850 	mutex_lock(&fw_lock);
851 	if (!fw_state_is_aborted(fw_priv))
852 		fw_state_aborted(fw_priv);
853 	mutex_unlock(&fw_lock);
854 }
855 
856 /* called from request_firmware() and request_firmware_work_func() */
857 static int
_request_firmware(const struct firmware ** firmware_p,const char * name,struct device * device,void * buf,size_t size,size_t offset,u32 opt_flags)858 _request_firmware(const struct firmware **firmware_p, const char *name,
859 		  struct device *device, void *buf, size_t size,
860 		  size_t offset, u32 opt_flags)
861 {
862 	struct firmware *fw = NULL;
863 	struct cred *kern_cred = NULL;
864 	const struct cred *old_cred;
865 	bool nondirect = false;
866 	int ret;
867 
868 	if (!firmware_p)
869 		return -EINVAL;
870 
871 	if (!name || name[0] == '\0') {
872 		ret = -EINVAL;
873 		goto out;
874 	}
875 
876 	ret = _request_firmware_prepare(&fw, name, device, buf, size,
877 					offset, opt_flags);
878 	if (ret <= 0) /* error or already assigned */
879 		goto out;
880 
881 	/*
882 	 * We are about to try to access the firmware file. Because we may have been
883 	 * called by a driver when serving an unrelated request from userland, we use
884 	 * the kernel credentials to read the file.
885 	 */
886 	kern_cred = prepare_kernel_cred(NULL);
887 	if (!kern_cred) {
888 		ret = -ENOMEM;
889 		goto out;
890 	}
891 	old_cred = override_creds(kern_cred);
892 
893 	ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL);
894 
895 	/* Only full reads can support decompression, platform, and sysfs. */
896 	if (!(opt_flags & FW_OPT_PARTIAL))
897 		nondirect = true;
898 
899 #ifdef CONFIG_FW_LOADER_COMPRESS
900 	if (ret == -ENOENT && nondirect)
901 		ret = fw_get_filesystem_firmware(device, fw->priv, ".xz",
902 						 fw_decompress_xz);
903 #endif
904 	if (ret == -ENOENT && nondirect)
905 		ret = firmware_fallback_platform(fw->priv);
906 
907 	if (ret) {
908 		if (!(opt_flags & FW_OPT_NO_WARN))
909 			dev_warn(device,
910 				 "Direct firmware load for %s failed with error %d\n",
911 				 name, ret);
912 		if (nondirect)
913 			ret = firmware_fallback_sysfs(fw, name, device,
914 						      opt_flags, ret);
915 	} else
916 		ret = assign_fw(fw, device);
917 
918 	revert_creds(old_cred);
919 	put_cred(kern_cred);
920 
921  out:
922 	if (ret < 0) {
923 		fw_abort_batch_reqs(fw);
924 		release_firmware(fw);
925 		fw = NULL;
926 	}
927 
928 	*firmware_p = fw;
929 	return ret;
930 }
931 
932 /**
933  * request_firmware() - send firmware request and wait for it
934  * @firmware_p: pointer to firmware image
935  * @name: name of firmware file
936  * @device: device for which firmware is being loaded
937  *
938  *      @firmware_p will be used to return a firmware image by the name
939  *      of @name for device @device.
940  *
941  *      Should be called from user context where sleeping is allowed.
942  *
943  *      @name will be used as $FIRMWARE in the uevent environment and
944  *      should be distinctive enough not to be confused with any other
945  *      firmware image for this or any other device.
946  *
947  *	Caller must hold the reference count of @device.
948  *
949  *	The function can be called safely inside device's suspend and
950  *	resume callback.
951  **/
952 int
request_firmware(const struct firmware ** firmware_p,const char * name,struct device * device)953 request_firmware(const struct firmware **firmware_p, const char *name,
954 		 struct device *device)
955 {
956 	int ret;
957 
958 	/* Need to pin this module until return */
959 	__module_get(THIS_MODULE);
960 	ret = _request_firmware(firmware_p, name, device, NULL, 0, 0,
961 				FW_OPT_UEVENT);
962 	module_put(THIS_MODULE);
963 	return ret;
964 }
965 EXPORT_SYMBOL(request_firmware);
966 
967 /**
968  * firmware_request_nowarn() - request for an optional fw module
969  * @firmware: pointer to firmware image
970  * @name: name of firmware file
971  * @device: device for which firmware is being loaded
972  *
973  * This function is similar in behaviour to request_firmware(), except it
974  * doesn't produce warning messages when the file is not found. The sysfs
975  * fallback mechanism is enabled if direct filesystem lookup fails. However,
976  * failures to find the firmware file with it are still suppressed. It is
977  * therefore up to the driver to check for the return value of this call and to
978  * decide when to inform the users of errors.
979  **/
firmware_request_nowarn(const struct firmware ** firmware,const char * name,struct device * device)980 int firmware_request_nowarn(const struct firmware **firmware, const char *name,
981 			    struct device *device)
982 {
983 	int ret;
984 
985 	/* Need to pin this module until return */
986 	__module_get(THIS_MODULE);
987 	ret = _request_firmware(firmware, name, device, NULL, 0, 0,
988 				FW_OPT_UEVENT | FW_OPT_NO_WARN);
989 	module_put(THIS_MODULE);
990 	return ret;
991 }
992 EXPORT_SYMBOL_GPL(firmware_request_nowarn);
993 
994 /**
995  * request_firmware_direct() - load firmware directly without usermode helper
996  * @firmware_p: pointer to firmware image
997  * @name: name of firmware file
998  * @device: device for which firmware is being loaded
999  *
1000  * This function works pretty much like request_firmware(), but this doesn't
1001  * fall back to usermode helper even if the firmware couldn't be loaded
1002  * directly from fs.  Hence it's useful for loading optional firmwares, which
1003  * aren't always present, without extra long timeouts of udev.
1004  **/
request_firmware_direct(const struct firmware ** firmware_p,const char * name,struct device * device)1005 int request_firmware_direct(const struct firmware **firmware_p,
1006 			    const char *name, struct device *device)
1007 {
1008 	int ret;
1009 
1010 	__module_get(THIS_MODULE);
1011 	ret = _request_firmware(firmware_p, name, device, NULL, 0, 0,
1012 				FW_OPT_UEVENT | FW_OPT_NO_WARN |
1013 				FW_OPT_NOFALLBACK_SYSFS);
1014 	module_put(THIS_MODULE);
1015 	return ret;
1016 }
1017 EXPORT_SYMBOL_GPL(request_firmware_direct);
1018 
1019 /**
1020  * firmware_request_platform() - request firmware with platform-fw fallback
1021  * @firmware: pointer to firmware image
1022  * @name: name of firmware file
1023  * @device: device for which firmware is being loaded
1024  *
1025  * This function is similar in behaviour to request_firmware, except that if
1026  * direct filesystem lookup fails, it will fallback to looking for a copy of the
1027  * requested firmware embedded in the platform's main (e.g. UEFI) firmware.
1028  **/
firmware_request_platform(const struct firmware ** firmware,const char * name,struct device * device)1029 int firmware_request_platform(const struct firmware **firmware,
1030 			      const char *name, struct device *device)
1031 {
1032 	int ret;
1033 
1034 	/* Need to pin this module until return */
1035 	__module_get(THIS_MODULE);
1036 	ret = _request_firmware(firmware, name, device, NULL, 0, 0,
1037 				FW_OPT_UEVENT | FW_OPT_FALLBACK_PLATFORM);
1038 	module_put(THIS_MODULE);
1039 	return ret;
1040 }
1041 EXPORT_SYMBOL_GPL(firmware_request_platform);
1042 
1043 /**
1044  * firmware_request_cache() - cache firmware for suspend so resume can use it
1045  * @name: name of firmware file
1046  * @device: device for which firmware should be cached for
1047  *
1048  * There are some devices with an optimization that enables the device to not
1049  * require loading firmware on system reboot. This optimization may still
1050  * require the firmware present on resume from suspend. This routine can be
1051  * used to ensure the firmware is present on resume from suspend in these
1052  * situations. This helper is not compatible with drivers which use
1053  * request_firmware_into_buf() or request_firmware_nowait() with no uevent set.
1054  **/
firmware_request_cache(struct device * device,const char * name)1055 int firmware_request_cache(struct device *device, const char *name)
1056 {
1057 	int ret;
1058 
1059 	mutex_lock(&fw_lock);
1060 	ret = fw_add_devm_name(device, name);
1061 	mutex_unlock(&fw_lock);
1062 
1063 	return ret;
1064 }
1065 EXPORT_SYMBOL_GPL(firmware_request_cache);
1066 
1067 /**
1068  * request_firmware_into_buf() - load firmware into a previously allocated buffer
1069  * @firmware_p: pointer to firmware image
1070  * @name: name of firmware file
1071  * @device: device for which firmware is being loaded and DMA region allocated
1072  * @buf: address of buffer to load firmware into
1073  * @size: size of buffer
1074  *
1075  * This function works pretty much like request_firmware(), but it doesn't
1076  * allocate a buffer to hold the firmware data. Instead, the firmware
1077  * is loaded directly into the buffer pointed to by @buf and the @firmware_p
1078  * data member is pointed at @buf.
1079  *
1080  * This function doesn't cache firmware either.
1081  */
1082 int
request_firmware_into_buf(const struct firmware ** firmware_p,const char * name,struct device * device,void * buf,size_t size)1083 request_firmware_into_buf(const struct firmware **firmware_p, const char *name,
1084 			  struct device *device, void *buf, size_t size)
1085 {
1086 	int ret;
1087 
1088 	if (fw_cache_is_setup(device, name))
1089 		return -EOPNOTSUPP;
1090 
1091 	__module_get(THIS_MODULE);
1092 	ret = _request_firmware(firmware_p, name, device, buf, size, 0,
1093 				FW_OPT_UEVENT | FW_OPT_NOCACHE);
1094 	module_put(THIS_MODULE);
1095 	return ret;
1096 }
1097 EXPORT_SYMBOL(request_firmware_into_buf);
1098 
1099 /**
1100  * request_partial_firmware_into_buf() - load partial firmware into a previously allocated buffer
1101  * @firmware_p: pointer to firmware image
1102  * @name: name of firmware file
1103  * @device: device for which firmware is being loaded and DMA region allocated
1104  * @buf: address of buffer to load firmware into
1105  * @size: size of buffer
1106  * @offset: offset into file to read
1107  *
1108  * This function works pretty much like request_firmware_into_buf except
1109  * it allows a partial read of the file.
1110  */
1111 int
request_partial_firmware_into_buf(const struct firmware ** firmware_p,const char * name,struct device * device,void * buf,size_t size,size_t offset)1112 request_partial_firmware_into_buf(const struct firmware **firmware_p,
1113 				  const char *name, struct device *device,
1114 				  void *buf, size_t size, size_t offset)
1115 {
1116 	int ret;
1117 
1118 	if (fw_cache_is_setup(device, name))
1119 		return -EOPNOTSUPP;
1120 
1121 	__module_get(THIS_MODULE);
1122 	ret = _request_firmware(firmware_p, name, device, buf, size, offset,
1123 				FW_OPT_UEVENT | FW_OPT_NOCACHE |
1124 				FW_OPT_PARTIAL);
1125 	module_put(THIS_MODULE);
1126 	return ret;
1127 }
1128 EXPORT_SYMBOL(request_partial_firmware_into_buf);
1129 
1130 /**
1131  * release_firmware() - release the resource associated with a firmware image
1132  * @fw: firmware resource to release
1133  **/
release_firmware(const struct firmware * fw)1134 void release_firmware(const struct firmware *fw)
1135 {
1136 	if (fw) {
1137 		if (!fw_is_builtin_firmware(fw))
1138 			firmware_free_data(fw);
1139 		kfree(fw);
1140 	}
1141 }
1142 EXPORT_SYMBOL(release_firmware);
1143 
1144 /* Async support */
1145 struct firmware_work {
1146 	struct work_struct work;
1147 	struct module *module;
1148 	const char *name;
1149 	struct device *device;
1150 	void *context;
1151 	void (*cont)(const struct firmware *fw, void *context);
1152 	u32 opt_flags;
1153 };
1154 
request_firmware_work_func(struct work_struct * work)1155 static void request_firmware_work_func(struct work_struct *work)
1156 {
1157 	struct firmware_work *fw_work;
1158 	const struct firmware *fw;
1159 
1160 	fw_work = container_of(work, struct firmware_work, work);
1161 
1162 	_request_firmware(&fw, fw_work->name, fw_work->device, NULL, 0, 0,
1163 			  fw_work->opt_flags);
1164 	fw_work->cont(fw, fw_work->context);
1165 	put_device(fw_work->device); /* taken in request_firmware_nowait() */
1166 
1167 	module_put(fw_work->module);
1168 	kfree_const(fw_work->name);
1169 	kfree(fw_work);
1170 }
1171 
1172 /**
1173  * request_firmware_nowait() - asynchronous version of request_firmware
1174  * @module: module requesting the firmware
1175  * @uevent: sends uevent to copy the firmware image if this flag
1176  *	is non-zero else the firmware copy must be done manually.
1177  * @name: name of firmware file
1178  * @device: device for which firmware is being loaded
1179  * @gfp: allocation flags
1180  * @context: will be passed over to @cont, and
1181  *	@fw may be %NULL if firmware request fails.
1182  * @cont: function will be called asynchronously when the firmware
1183  *	request is over.
1184  *
1185  *	Caller must hold the reference count of @device.
1186  *
1187  *	Asynchronous variant of request_firmware() for user contexts:
1188  *		- sleep for as small periods as possible since it may
1189  *		  increase kernel boot time of built-in device drivers
1190  *		  requesting firmware in their ->probe() methods, if
1191  *		  @gfp is GFP_KERNEL.
1192  *
1193  *		- can't sleep at all if @gfp is GFP_ATOMIC.
1194  **/
1195 int
request_firmware_nowait(struct module * module,bool uevent,const char * name,struct device * device,gfp_t gfp,void * context,void (* cont)(const struct firmware * fw,void * context))1196 request_firmware_nowait(
1197 	struct module *module, bool uevent,
1198 	const char *name, struct device *device, gfp_t gfp, void *context,
1199 	void (*cont)(const struct firmware *fw, void *context))
1200 {
1201 	struct firmware_work *fw_work;
1202 
1203 	fw_work = kzalloc(sizeof(struct firmware_work), gfp);
1204 	if (!fw_work)
1205 		return -ENOMEM;
1206 
1207 	fw_work->module = module;
1208 	fw_work->name = kstrdup_const(name, gfp);
1209 	if (!fw_work->name) {
1210 		kfree(fw_work);
1211 		return -ENOMEM;
1212 	}
1213 	fw_work->device = device;
1214 	fw_work->context = context;
1215 	fw_work->cont = cont;
1216 	fw_work->opt_flags = FW_OPT_NOWAIT |
1217 		(uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER);
1218 
1219 	if (!uevent && fw_cache_is_setup(device, name)) {
1220 		kfree_const(fw_work->name);
1221 		kfree(fw_work);
1222 		return -EOPNOTSUPP;
1223 	}
1224 
1225 	if (!try_module_get(module)) {
1226 		kfree_const(fw_work->name);
1227 		kfree(fw_work);
1228 		return -EFAULT;
1229 	}
1230 
1231 	get_device(fw_work->device);
1232 	INIT_WORK(&fw_work->work, request_firmware_work_func);
1233 	schedule_work(&fw_work->work);
1234 	return 0;
1235 }
1236 EXPORT_SYMBOL(request_firmware_nowait);
1237 
1238 #ifdef CONFIG_FW_CACHE
1239 static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
1240 
1241 /**
1242  * cache_firmware() - cache one firmware image in kernel memory space
1243  * @fw_name: the firmware image name
1244  *
1245  * Cache firmware in kernel memory so that drivers can use it when
1246  * system isn't ready for them to request firmware image from userspace.
1247  * Once it returns successfully, driver can use request_firmware or its
1248  * nowait version to get the cached firmware without any interacting
1249  * with userspace
1250  *
1251  * Return 0 if the firmware image has been cached successfully
1252  * Return !0 otherwise
1253  *
1254  */
cache_firmware(const char * fw_name)1255 static int cache_firmware(const char *fw_name)
1256 {
1257 	int ret;
1258 	const struct firmware *fw;
1259 
1260 	pr_debug("%s: %s\n", __func__, fw_name);
1261 
1262 	ret = request_firmware(&fw, fw_name, NULL);
1263 	if (!ret)
1264 		kfree(fw);
1265 
1266 	pr_debug("%s: %s ret=%d\n", __func__, fw_name, ret);
1267 
1268 	return ret;
1269 }
1270 
lookup_fw_priv(const char * fw_name)1271 static struct fw_priv *lookup_fw_priv(const char *fw_name)
1272 {
1273 	struct fw_priv *tmp;
1274 	struct firmware_cache *fwc = &fw_cache;
1275 
1276 	spin_lock(&fwc->lock);
1277 	tmp = __lookup_fw_priv(fw_name);
1278 	spin_unlock(&fwc->lock);
1279 
1280 	return tmp;
1281 }
1282 
1283 /**
1284  * uncache_firmware() - remove one cached firmware image
1285  * @fw_name: the firmware image name
1286  *
1287  * Uncache one firmware image which has been cached successfully
1288  * before.
1289  *
1290  * Return 0 if the firmware cache has been removed successfully
1291  * Return !0 otherwise
1292  *
1293  */
uncache_firmware(const char * fw_name)1294 static int uncache_firmware(const char *fw_name)
1295 {
1296 	struct fw_priv *fw_priv;
1297 	struct firmware fw;
1298 
1299 	pr_debug("%s: %s\n", __func__, fw_name);
1300 
1301 	if (fw_get_builtin_firmware(&fw, fw_name, NULL, 0))
1302 		return 0;
1303 
1304 	fw_priv = lookup_fw_priv(fw_name);
1305 	if (fw_priv) {
1306 		free_fw_priv(fw_priv);
1307 		return 0;
1308 	}
1309 
1310 	return -EINVAL;
1311 }
1312 
alloc_fw_cache_entry(const char * name)1313 static struct fw_cache_entry *alloc_fw_cache_entry(const char *name)
1314 {
1315 	struct fw_cache_entry *fce;
1316 
1317 	fce = kzalloc(sizeof(*fce), GFP_ATOMIC);
1318 	if (!fce)
1319 		goto exit;
1320 
1321 	fce->name = kstrdup_const(name, GFP_ATOMIC);
1322 	if (!fce->name) {
1323 		kfree(fce);
1324 		fce = NULL;
1325 		goto exit;
1326 	}
1327 exit:
1328 	return fce;
1329 }
1330 
__fw_entry_found(const char * name)1331 static int __fw_entry_found(const char *name)
1332 {
1333 	struct firmware_cache *fwc = &fw_cache;
1334 	struct fw_cache_entry *fce;
1335 
1336 	list_for_each_entry(fce, &fwc->fw_names, list) {
1337 		if (!strcmp(fce->name, name))
1338 			return 1;
1339 	}
1340 	return 0;
1341 }
1342 
fw_cache_piggyback_on_request(struct fw_priv * fw_priv)1343 static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv)
1344 {
1345 	const char *name = fw_priv->fw_name;
1346 	struct firmware_cache *fwc = fw_priv->fwc;
1347 	struct fw_cache_entry *fce;
1348 
1349 	spin_lock(&fwc->name_lock);
1350 	if (__fw_entry_found(name))
1351 		goto found;
1352 
1353 	fce = alloc_fw_cache_entry(name);
1354 	if (fce) {
1355 		list_add(&fce->list, &fwc->fw_names);
1356 		kref_get(&fw_priv->ref);
1357 		pr_debug("%s: fw: %s\n", __func__, name);
1358 	}
1359 found:
1360 	spin_unlock(&fwc->name_lock);
1361 }
1362 
free_fw_cache_entry(struct fw_cache_entry * fce)1363 static void free_fw_cache_entry(struct fw_cache_entry *fce)
1364 {
1365 	kfree_const(fce->name);
1366 	kfree(fce);
1367 }
1368 
__async_dev_cache_fw_image(void * fw_entry,async_cookie_t cookie)1369 static void __async_dev_cache_fw_image(void *fw_entry,
1370 				       async_cookie_t cookie)
1371 {
1372 	struct fw_cache_entry *fce = fw_entry;
1373 	struct firmware_cache *fwc = &fw_cache;
1374 	int ret;
1375 
1376 	ret = cache_firmware(fce->name);
1377 	if (ret) {
1378 		spin_lock(&fwc->name_lock);
1379 		list_del(&fce->list);
1380 		spin_unlock(&fwc->name_lock);
1381 
1382 		free_fw_cache_entry(fce);
1383 	}
1384 }
1385 
1386 /* called with dev->devres_lock held */
dev_create_fw_entry(struct device * dev,void * res,void * data)1387 static void dev_create_fw_entry(struct device *dev, void *res,
1388 				void *data)
1389 {
1390 	struct fw_name_devm *fwn = res;
1391 	const char *fw_name = fwn->name;
1392 	struct list_head *head = data;
1393 	struct fw_cache_entry *fce;
1394 
1395 	fce = alloc_fw_cache_entry(fw_name);
1396 	if (fce)
1397 		list_add(&fce->list, head);
1398 }
1399 
devm_name_match(struct device * dev,void * res,void * match_data)1400 static int devm_name_match(struct device *dev, void *res,
1401 			   void *match_data)
1402 {
1403 	struct fw_name_devm *fwn = res;
1404 	return (fwn->magic == (unsigned long)match_data);
1405 }
1406 
dev_cache_fw_image(struct device * dev,void * data)1407 static void dev_cache_fw_image(struct device *dev, void *data)
1408 {
1409 	LIST_HEAD(todo);
1410 	struct fw_cache_entry *fce;
1411 	struct fw_cache_entry *fce_next;
1412 	struct firmware_cache *fwc = &fw_cache;
1413 
1414 	devres_for_each_res(dev, fw_name_devm_release,
1415 			    devm_name_match, &fw_cache,
1416 			    dev_create_fw_entry, &todo);
1417 
1418 	list_for_each_entry_safe(fce, fce_next, &todo, list) {
1419 		list_del(&fce->list);
1420 
1421 		spin_lock(&fwc->name_lock);
1422 		/* only one cache entry for one firmware */
1423 		if (!__fw_entry_found(fce->name)) {
1424 			list_add(&fce->list, &fwc->fw_names);
1425 		} else {
1426 			free_fw_cache_entry(fce);
1427 			fce = NULL;
1428 		}
1429 		spin_unlock(&fwc->name_lock);
1430 
1431 		if (fce)
1432 			async_schedule_domain(__async_dev_cache_fw_image,
1433 					      (void *)fce,
1434 					      &fw_cache_domain);
1435 	}
1436 }
1437 
__device_uncache_fw_images(void)1438 static void __device_uncache_fw_images(void)
1439 {
1440 	struct firmware_cache *fwc = &fw_cache;
1441 	struct fw_cache_entry *fce;
1442 
1443 	spin_lock(&fwc->name_lock);
1444 	while (!list_empty(&fwc->fw_names)) {
1445 		fce = list_entry(fwc->fw_names.next,
1446 				struct fw_cache_entry, list);
1447 		list_del(&fce->list);
1448 		spin_unlock(&fwc->name_lock);
1449 
1450 		uncache_firmware(fce->name);
1451 		free_fw_cache_entry(fce);
1452 
1453 		spin_lock(&fwc->name_lock);
1454 	}
1455 	spin_unlock(&fwc->name_lock);
1456 }
1457 
1458 /**
1459  * device_cache_fw_images() - cache devices' firmware
1460  *
1461  * If one device called request_firmware or its nowait version
1462  * successfully before, the firmware names are recored into the
1463  * device's devres link list, so device_cache_fw_images can call
1464  * cache_firmware() to cache these firmwares for the device,
1465  * then the device driver can load its firmwares easily at
1466  * time when system is not ready to complete loading firmware.
1467  */
device_cache_fw_images(void)1468 static void device_cache_fw_images(void)
1469 {
1470 	struct firmware_cache *fwc = &fw_cache;
1471 	DEFINE_WAIT(wait);
1472 
1473 	pr_debug("%s\n", __func__);
1474 
1475 	/* cancel uncache work */
1476 	cancel_delayed_work_sync(&fwc->work);
1477 
1478 	fw_fallback_set_cache_timeout();
1479 
1480 	mutex_lock(&fw_lock);
1481 	fwc->state = FW_LOADER_START_CACHE;
1482 	dpm_for_each_dev(NULL, dev_cache_fw_image);
1483 	mutex_unlock(&fw_lock);
1484 
1485 	/* wait for completion of caching firmware for all devices */
1486 	async_synchronize_full_domain(&fw_cache_domain);
1487 
1488 	fw_fallback_set_default_timeout();
1489 }
1490 
1491 /**
1492  * device_uncache_fw_images() - uncache devices' firmware
1493  *
1494  * uncache all firmwares which have been cached successfully
1495  * by device_uncache_fw_images earlier
1496  */
device_uncache_fw_images(void)1497 static void device_uncache_fw_images(void)
1498 {
1499 	pr_debug("%s\n", __func__);
1500 	__device_uncache_fw_images();
1501 }
1502 
device_uncache_fw_images_work(struct work_struct * work)1503 static void device_uncache_fw_images_work(struct work_struct *work)
1504 {
1505 	device_uncache_fw_images();
1506 }
1507 
1508 /**
1509  * device_uncache_fw_images_delay() - uncache devices firmwares
1510  * @delay: number of milliseconds to delay uncache device firmwares
1511  *
1512  * uncache all devices's firmwares which has been cached successfully
1513  * by device_cache_fw_images after @delay milliseconds.
1514  */
device_uncache_fw_images_delay(unsigned long delay)1515 static void device_uncache_fw_images_delay(unsigned long delay)
1516 {
1517 	queue_delayed_work(system_power_efficient_wq, &fw_cache.work,
1518 			   msecs_to_jiffies(delay));
1519 }
1520 
fw_pm_notify(struct notifier_block * notify_block,unsigned long mode,void * unused)1521 static int fw_pm_notify(struct notifier_block *notify_block,
1522 			unsigned long mode, void *unused)
1523 {
1524 	switch (mode) {
1525 	case PM_HIBERNATION_PREPARE:
1526 	case PM_SUSPEND_PREPARE:
1527 	case PM_RESTORE_PREPARE:
1528 		/*
1529 		 * Here, kill pending fallback requests will only kill
1530 		 * non-uevent firmware request to avoid stalling suspend.
1531 		 */
1532 		kill_pending_fw_fallback_reqs(false);
1533 		device_cache_fw_images();
1534 		break;
1535 
1536 	case PM_POST_SUSPEND:
1537 	case PM_POST_HIBERNATION:
1538 	case PM_POST_RESTORE:
1539 		/*
1540 		 * In case that system sleep failed and syscore_suspend is
1541 		 * not called.
1542 		 */
1543 		mutex_lock(&fw_lock);
1544 		fw_cache.state = FW_LOADER_NO_CACHE;
1545 		mutex_unlock(&fw_lock);
1546 
1547 		device_uncache_fw_images_delay(10 * MSEC_PER_SEC);
1548 		break;
1549 	}
1550 
1551 	return 0;
1552 }
1553 
1554 /* stop caching firmware once syscore_suspend is reached */
fw_suspend(void)1555 static int fw_suspend(void)
1556 {
1557 	fw_cache.state = FW_LOADER_NO_CACHE;
1558 	return 0;
1559 }
1560 
1561 static struct syscore_ops fw_syscore_ops = {
1562 	.suspend = fw_suspend,
1563 };
1564 
register_fw_pm_ops(void)1565 static int __init register_fw_pm_ops(void)
1566 {
1567 	int ret;
1568 
1569 	spin_lock_init(&fw_cache.name_lock);
1570 	INIT_LIST_HEAD(&fw_cache.fw_names);
1571 
1572 	INIT_DELAYED_WORK(&fw_cache.work,
1573 			  device_uncache_fw_images_work);
1574 
1575 	fw_cache.pm_notify.notifier_call = fw_pm_notify;
1576 	ret = register_pm_notifier(&fw_cache.pm_notify);
1577 	if (ret)
1578 		return ret;
1579 
1580 	register_syscore_ops(&fw_syscore_ops);
1581 
1582 	return ret;
1583 }
1584 
unregister_fw_pm_ops(void)1585 static inline void unregister_fw_pm_ops(void)
1586 {
1587 	unregister_syscore_ops(&fw_syscore_ops);
1588 	unregister_pm_notifier(&fw_cache.pm_notify);
1589 }
1590 #else
fw_cache_piggyback_on_request(struct fw_priv * fw_priv)1591 static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv)
1592 {
1593 }
register_fw_pm_ops(void)1594 static inline int register_fw_pm_ops(void)
1595 {
1596 	return 0;
1597 }
unregister_fw_pm_ops(void)1598 static inline void unregister_fw_pm_ops(void)
1599 {
1600 }
1601 #endif
1602 
fw_cache_init(void)1603 static void __init fw_cache_init(void)
1604 {
1605 	spin_lock_init(&fw_cache.lock);
1606 	INIT_LIST_HEAD(&fw_cache.head);
1607 	fw_cache.state = FW_LOADER_NO_CACHE;
1608 }
1609 
fw_shutdown_notify(struct notifier_block * unused1,unsigned long unused2,void * unused3)1610 static int fw_shutdown_notify(struct notifier_block *unused1,
1611 			      unsigned long unused2, void *unused3)
1612 {
1613 	/*
1614 	 * Kill all pending fallback requests to avoid both stalling shutdown,
1615 	 * and avoid a deadlock with the usermode_lock.
1616 	 */
1617 	kill_pending_fw_fallback_reqs(true);
1618 
1619 	return NOTIFY_DONE;
1620 }
1621 
1622 static struct notifier_block fw_shutdown_nb = {
1623 	.notifier_call = fw_shutdown_notify,
1624 };
1625 
firmware_class_init(void)1626 static int __init firmware_class_init(void)
1627 {
1628 	int ret;
1629 
1630 	/* No need to unfold these on exit */
1631 	fw_cache_init();
1632 
1633 	ret = register_fw_pm_ops();
1634 	if (ret)
1635 		return ret;
1636 
1637 	ret = register_reboot_notifier(&fw_shutdown_nb);
1638 	if (ret)
1639 		goto out;
1640 
1641 	return register_sysfs_loader();
1642 
1643 out:
1644 	unregister_fw_pm_ops();
1645 	return ret;
1646 }
1647 
firmware_class_exit(void)1648 static void __exit firmware_class_exit(void)
1649 {
1650 	unregister_fw_pm_ops();
1651 	unregister_reboot_notifier(&fw_shutdown_nb);
1652 	unregister_sysfs_loader();
1653 }
1654 
1655 fs_initcall(firmware_class_init);
1656 module_exit(firmware_class_exit);
1657