blob: 249349f64bfe91e13f99ec875b59578785f8d9a8 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * main.c - Multi purpose firmware loading support
4 *
5 * Copyright (c) 2003 Manuel Estrada Sainz
6 *
7 * Please see Documentation/firmware_class/ for more information.
8 *
9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/capability.h>
14#include <linux/device.h>
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/timer.h>
18#include <linux/vmalloc.h>
19#include <linux/interrupt.h>
20#include <linux/bitops.h>
21#include <linux/mutex.h>
22#include <linux/workqueue.h>
23#include <linux/highmem.h>
24#include <linux/firmware.h>
25#include <linux/slab.h>
26#include <linux/sched.h>
27#include <linux/file.h>
28#include <linux/list.h>
29#include <linux/fs.h>
30#include <linux/async.h>
31#include <linux/pm.h>
32#include <linux/suspend.h>
33#include <linux/syscore_ops.h>
34#include <linux/reboot.h>
35#include <linux/security.h>
David Brazdil0f672f62019-12-10 10:32:29 +000036#include <linux/xz.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000037
38#include <generated/utsrelease.h>
39
40#include "../base.h"
41#include "firmware.h"
42#include "fallback.h"
43
44MODULE_AUTHOR("Manuel Estrada Sainz");
45MODULE_DESCRIPTION("Multi purpose firmware loading support");
46MODULE_LICENSE("GPL");
47
48struct firmware_cache {
49 /* firmware_buf instance will be added into the below list */
50 spinlock_t lock;
51 struct list_head head;
52 int state;
53
54#ifdef CONFIG_PM_SLEEP
55 /*
56 * Names of firmware images which have been cached successfully
57 * will be added into the below list so that device uncache
58 * helper can trace which firmware images have been cached
59 * before.
60 */
61 spinlock_t name_lock;
62 struct list_head fw_names;
63
64 struct delayed_work work;
65
66 struct notifier_block pm_notify;
67#endif
68};
69
70struct fw_cache_entry {
71 struct list_head list;
72 const char *name;
73};
74
75struct fw_name_devm {
76 unsigned long magic;
77 const char *name;
78};
79
80static inline struct fw_priv *to_fw_priv(struct kref *ref)
81{
82 return container_of(ref, struct fw_priv, ref);
83}
84
85#define FW_LOADER_NO_CACHE 0
86#define FW_LOADER_START_CACHE 1
87
88/* fw_lock could be moved to 'struct fw_sysfs' but since it is just
89 * guarding for corner cases a global lock should be OK */
90DEFINE_MUTEX(fw_lock);
91
92static struct firmware_cache fw_cache;
93
94/* Builtin firmware support */
95
96#ifdef CONFIG_FW_LOADER
97
98extern struct builtin_fw __start_builtin_fw[];
99extern struct builtin_fw __end_builtin_fw[];
100
101static void fw_copy_to_prealloc_buf(struct firmware *fw,
102 void *buf, size_t size)
103{
104 if (!buf || size < fw->size)
105 return;
106 memcpy(buf, fw->data, fw->size);
107}
108
109static bool fw_get_builtin_firmware(struct firmware *fw, const char *name,
110 void *buf, size_t size)
111{
112 struct builtin_fw *b_fw;
113
114 for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
115 if (strcmp(name, b_fw->name) == 0) {
116 fw->size = b_fw->size;
117 fw->data = b_fw->data;
118 fw_copy_to_prealloc_buf(fw, buf, size);
119
120 return true;
121 }
122 }
123
124 return false;
125}
126
127static bool fw_is_builtin_firmware(const struct firmware *fw)
128{
129 struct builtin_fw *b_fw;
130
131 for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++)
132 if (fw->data == b_fw->data)
133 return true;
134
135 return false;
136}
137
138#else /* Module case - no builtin firmware support */
139
140static inline bool fw_get_builtin_firmware(struct firmware *fw,
141 const char *name, void *buf,
142 size_t size)
143{
144 return false;
145}
146
147static inline bool fw_is_builtin_firmware(const struct firmware *fw)
148{
149 return false;
150}
151#endif
152
153static void fw_state_init(struct fw_priv *fw_priv)
154{
155 struct fw_state *fw_st = &fw_priv->fw_st;
156
157 init_completion(&fw_st->completion);
158 fw_st->status = FW_STATUS_UNKNOWN;
159}
160
161static inline int fw_state_wait(struct fw_priv *fw_priv)
162{
163 return __fw_state_wait_common(fw_priv, MAX_SCHEDULE_TIMEOUT);
164}
165
166static int fw_cache_piggyback_on_request(const char *name);
167
168static struct fw_priv *__allocate_fw_priv(const char *fw_name,
169 struct firmware_cache *fwc,
170 void *dbuf, size_t size)
171{
172 struct fw_priv *fw_priv;
173
174 fw_priv = kzalloc(sizeof(*fw_priv), GFP_ATOMIC);
175 if (!fw_priv)
176 return NULL;
177
178 fw_priv->fw_name = kstrdup_const(fw_name, GFP_ATOMIC);
179 if (!fw_priv->fw_name) {
180 kfree(fw_priv);
181 return NULL;
182 }
183
184 kref_init(&fw_priv->ref);
185 fw_priv->fwc = fwc;
186 fw_priv->data = dbuf;
187 fw_priv->allocated_size = size;
188 fw_state_init(fw_priv);
189#ifdef CONFIG_FW_LOADER_USER_HELPER
190 INIT_LIST_HEAD(&fw_priv->pending_list);
191#endif
192
193 pr_debug("%s: fw-%s fw_priv=%p\n", __func__, fw_name, fw_priv);
194
195 return fw_priv;
196}
197
198static struct fw_priv *__lookup_fw_priv(const char *fw_name)
199{
200 struct fw_priv *tmp;
201 struct firmware_cache *fwc = &fw_cache;
202
203 list_for_each_entry(tmp, &fwc->head, list)
204 if (!strcmp(tmp->fw_name, fw_name))
205 return tmp;
206 return NULL;
207}
208
209/* Returns 1 for batching firmware requests with the same name */
210static int alloc_lookup_fw_priv(const char *fw_name,
211 struct firmware_cache *fwc,
212 struct fw_priv **fw_priv, void *dbuf,
213 size_t size, enum fw_opt opt_flags)
214{
215 struct fw_priv *tmp;
216
217 spin_lock(&fwc->lock);
218 if (!(opt_flags & FW_OPT_NOCACHE)) {
219 tmp = __lookup_fw_priv(fw_name);
220 if (tmp) {
221 kref_get(&tmp->ref);
222 spin_unlock(&fwc->lock);
223 *fw_priv = tmp;
224 pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n");
225 return 1;
226 }
227 }
228
229 tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size);
230 if (tmp) {
231 INIT_LIST_HEAD(&tmp->list);
232 if (!(opt_flags & FW_OPT_NOCACHE))
233 list_add(&tmp->list, &fwc->head);
234 }
235 spin_unlock(&fwc->lock);
236
237 *fw_priv = tmp;
238
239 return tmp ? 0 : -ENOMEM;
240}
241
242static void __free_fw_priv(struct kref *ref)
243 __releases(&fwc->lock)
244{
245 struct fw_priv *fw_priv = to_fw_priv(ref);
246 struct firmware_cache *fwc = fw_priv->fwc;
247
248 pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n",
249 __func__, fw_priv->fw_name, fw_priv, fw_priv->data,
250 (unsigned int)fw_priv->size);
251
252 list_del(&fw_priv->list);
253 spin_unlock(&fwc->lock);
254
Olivier Deprez0e641232021-09-23 10:07:05 +0200255 if (fw_is_paged_buf(fw_priv))
256 fw_free_paged_buf(fw_priv);
257 else if (!fw_priv->allocated_size)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000258 vfree(fw_priv->data);
Olivier Deprez0e641232021-09-23 10:07:05 +0200259
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000260 kfree_const(fw_priv->fw_name);
261 kfree(fw_priv);
262}
263
264static void free_fw_priv(struct fw_priv *fw_priv)
265{
266 struct firmware_cache *fwc = fw_priv->fwc;
267 spin_lock(&fwc->lock);
268 if (!kref_put(&fw_priv->ref, __free_fw_priv))
269 spin_unlock(&fwc->lock);
270}
271
David Brazdil0f672f62019-12-10 10:32:29 +0000272#ifdef CONFIG_FW_LOADER_PAGED_BUF
Olivier Deprez0e641232021-09-23 10:07:05 +0200273bool fw_is_paged_buf(struct fw_priv *fw_priv)
274{
275 return fw_priv->is_paged_buf;
276}
277
David Brazdil0f672f62019-12-10 10:32:29 +0000278void fw_free_paged_buf(struct fw_priv *fw_priv)
279{
280 int i;
281
282 if (!fw_priv->pages)
283 return;
284
Olivier Deprez0e641232021-09-23 10:07:05 +0200285 vunmap(fw_priv->data);
286
David Brazdil0f672f62019-12-10 10:32:29 +0000287 for (i = 0; i < fw_priv->nr_pages; i++)
288 __free_page(fw_priv->pages[i]);
289 kvfree(fw_priv->pages);
290 fw_priv->pages = NULL;
291 fw_priv->page_array_size = 0;
292 fw_priv->nr_pages = 0;
293}
294
295int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed)
296{
297 /* If the array of pages is too small, grow it */
298 if (fw_priv->page_array_size < pages_needed) {
299 int new_array_size = max(pages_needed,
300 fw_priv->page_array_size * 2);
301 struct page **new_pages;
302
303 new_pages = kvmalloc_array(new_array_size, sizeof(void *),
304 GFP_KERNEL);
305 if (!new_pages)
306 return -ENOMEM;
307 memcpy(new_pages, fw_priv->pages,
308 fw_priv->page_array_size * sizeof(void *));
309 memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) *
310 (new_array_size - fw_priv->page_array_size));
311 kvfree(fw_priv->pages);
312 fw_priv->pages = new_pages;
313 fw_priv->page_array_size = new_array_size;
314 }
315
316 while (fw_priv->nr_pages < pages_needed) {
317 fw_priv->pages[fw_priv->nr_pages] =
318 alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
319
320 if (!fw_priv->pages[fw_priv->nr_pages])
321 return -ENOMEM;
322 fw_priv->nr_pages++;
323 }
324
325 return 0;
326}
327
328int fw_map_paged_buf(struct fw_priv *fw_priv)
329{
330 /* one pages buffer should be mapped/unmapped only once */
331 if (!fw_priv->pages)
332 return 0;
333
334 vunmap(fw_priv->data);
335 fw_priv->data = vmap(fw_priv->pages, fw_priv->nr_pages, 0,
336 PAGE_KERNEL_RO);
337 if (!fw_priv->data)
338 return -ENOMEM;
339
David Brazdil0f672f62019-12-10 10:32:29 +0000340 return 0;
341}
342#endif
343
344/*
345 * XZ-compressed firmware support
346 */
347#ifdef CONFIG_FW_LOADER_COMPRESS
348/* show an error and return the standard error code */
349static int fw_decompress_xz_error(struct device *dev, enum xz_ret xz_ret)
350{
351 if (xz_ret != XZ_STREAM_END) {
352 dev_warn(dev, "xz decompression failed (xz_ret=%d)\n", xz_ret);
353 return xz_ret == XZ_MEM_ERROR ? -ENOMEM : -EINVAL;
354 }
355 return 0;
356}
357
358/* single-shot decompression onto the pre-allocated buffer */
359static int fw_decompress_xz_single(struct device *dev, struct fw_priv *fw_priv,
360 size_t in_size, const void *in_buffer)
361{
362 struct xz_dec *xz_dec;
363 struct xz_buf xz_buf;
364 enum xz_ret xz_ret;
365
366 xz_dec = xz_dec_init(XZ_SINGLE, (u32)-1);
367 if (!xz_dec)
368 return -ENOMEM;
369
370 xz_buf.in_size = in_size;
371 xz_buf.in = in_buffer;
372 xz_buf.in_pos = 0;
373 xz_buf.out_size = fw_priv->allocated_size;
374 xz_buf.out = fw_priv->data;
375 xz_buf.out_pos = 0;
376
377 xz_ret = xz_dec_run(xz_dec, &xz_buf);
378 xz_dec_end(xz_dec);
379
380 fw_priv->size = xz_buf.out_pos;
381 return fw_decompress_xz_error(dev, xz_ret);
382}
383
384/* decompression on paged buffer and map it */
385static int fw_decompress_xz_pages(struct device *dev, struct fw_priv *fw_priv,
386 size_t in_size, const void *in_buffer)
387{
388 struct xz_dec *xz_dec;
389 struct xz_buf xz_buf;
390 enum xz_ret xz_ret;
391 struct page *page;
392 int err = 0;
393
394 xz_dec = xz_dec_init(XZ_DYNALLOC, (u32)-1);
395 if (!xz_dec)
396 return -ENOMEM;
397
398 xz_buf.in_size = in_size;
399 xz_buf.in = in_buffer;
400 xz_buf.in_pos = 0;
401
402 fw_priv->is_paged_buf = true;
403 fw_priv->size = 0;
404 do {
405 if (fw_grow_paged_buf(fw_priv, fw_priv->nr_pages + 1)) {
406 err = -ENOMEM;
407 goto out;
408 }
409
410 /* decompress onto the new allocated page */
411 page = fw_priv->pages[fw_priv->nr_pages - 1];
412 xz_buf.out = kmap(page);
413 xz_buf.out_pos = 0;
414 xz_buf.out_size = PAGE_SIZE;
415 xz_ret = xz_dec_run(xz_dec, &xz_buf);
416 kunmap(page);
417 fw_priv->size += xz_buf.out_pos;
418 /* partial decompression means either end or error */
419 if (xz_buf.out_pos != PAGE_SIZE)
420 break;
421 } while (xz_ret == XZ_OK);
422
423 err = fw_decompress_xz_error(dev, xz_ret);
424 if (!err)
425 err = fw_map_paged_buf(fw_priv);
426
427 out:
428 xz_dec_end(xz_dec);
429 return err;
430}
431
432static int fw_decompress_xz(struct device *dev, struct fw_priv *fw_priv,
433 size_t in_size, const void *in_buffer)
434{
435 /* if the buffer is pre-allocated, we can perform in single-shot mode */
436 if (fw_priv->data)
437 return fw_decompress_xz_single(dev, fw_priv, in_size, in_buffer);
438 else
439 return fw_decompress_xz_pages(dev, fw_priv, in_size, in_buffer);
440}
441#endif /* CONFIG_FW_LOADER_COMPRESS */
442
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000443/* direct firmware loading support */
444static char fw_path_para[256];
445static const char * const fw_path[] = {
446 fw_path_para,
447 "/lib/firmware/updates/" UTS_RELEASE,
448 "/lib/firmware/updates",
449 "/lib/firmware/" UTS_RELEASE,
450 "/lib/firmware"
451};
452
453/*
454 * Typical usage is that passing 'firmware_class.path=$CUSTOMIZED_PATH'
455 * from kernel command line because firmware_class is generally built in
456 * kernel instead of module.
457 */
458module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
459MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
460
461static int
David Brazdil0f672f62019-12-10 10:32:29 +0000462fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv,
463 const char *suffix,
464 int (*decompress)(struct device *dev,
465 struct fw_priv *fw_priv,
466 size_t in_size,
467 const void *in_buffer))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000468{
469 loff_t size;
470 int i, len;
471 int rc = -ENOENT;
472 char *path;
473 enum kernel_read_file_id id = READING_FIRMWARE;
474 size_t msize = INT_MAX;
David Brazdil0f672f62019-12-10 10:32:29 +0000475 void *buffer = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000476
477 /* Already populated data member means we're loading into a buffer */
David Brazdil0f672f62019-12-10 10:32:29 +0000478 if (!decompress && fw_priv->data) {
479 buffer = fw_priv->data;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000480 id = READING_FIRMWARE_PREALLOC_BUFFER;
481 msize = fw_priv->allocated_size;
482 }
483
484 path = __getname();
485 if (!path)
486 return -ENOMEM;
487
488 for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
489 /* skip the unset customized path */
490 if (!fw_path[i][0])
491 continue;
492
David Brazdil0f672f62019-12-10 10:32:29 +0000493 len = snprintf(path, PATH_MAX, "%s/%s%s",
494 fw_path[i], fw_priv->fw_name, suffix);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000495 if (len >= PATH_MAX) {
496 rc = -ENAMETOOLONG;
497 break;
498 }
499
500 fw_priv->size = 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000501 rc = kernel_read_file_from_path(path, &buffer, &size,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000502 msize, id);
503 if (rc) {
David Brazdil0f672f62019-12-10 10:32:29 +0000504 if (rc != -ENOENT)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000505 dev_warn(device, "loading %s failed with error %d\n",
506 path, rc);
David Brazdil0f672f62019-12-10 10:32:29 +0000507 else
508 dev_dbg(device, "loading %s failed for no such file or directory.\n",
509 path);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000510 continue;
511 }
David Brazdil0f672f62019-12-10 10:32:29 +0000512 if (decompress) {
513 dev_dbg(device, "f/w decompressing %s\n",
514 fw_priv->fw_name);
515 rc = decompress(device, fw_priv, size, buffer);
516 /* discard the superfluous original content */
517 vfree(buffer);
518 buffer = NULL;
519 if (rc) {
520 fw_free_paged_buf(fw_priv);
521 continue;
522 }
523 } else {
524 dev_dbg(device, "direct-loading %s\n",
525 fw_priv->fw_name);
526 if (!fw_priv->data)
527 fw_priv->data = buffer;
528 fw_priv->size = size;
529 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000530 fw_state_done(fw_priv);
531 break;
532 }
533 __putname(path);
534
535 return rc;
536}
537
538/* firmware holds the ownership of pages */
539static void firmware_free_data(const struct firmware *fw)
540{
541 /* Loaded directly? */
542 if (!fw->priv) {
543 vfree(fw->data);
544 return;
545 }
546 free_fw_priv(fw->priv);
547}
548
549/* store the pages buffer info firmware from buf */
550static void fw_set_page_data(struct fw_priv *fw_priv, struct firmware *fw)
551{
552 fw->priv = fw_priv;
553#ifdef CONFIG_FW_LOADER_USER_HELPER
554 fw->pages = fw_priv->pages;
555#endif
556 fw->size = fw_priv->size;
557 fw->data = fw_priv->data;
558
559 pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n",
560 __func__, fw_priv->fw_name, fw_priv, fw_priv->data,
561 (unsigned int)fw_priv->size);
562}
563
564#ifdef CONFIG_PM_SLEEP
565static void fw_name_devm_release(struct device *dev, void *res)
566{
567 struct fw_name_devm *fwn = res;
568
569 if (fwn->magic == (unsigned long)&fw_cache)
570 pr_debug("%s: fw_name-%s devm-%p released\n",
571 __func__, fwn->name, res);
572 kfree_const(fwn->name);
573}
574
575static int fw_devm_match(struct device *dev, void *res,
576 void *match_data)
577{
578 struct fw_name_devm *fwn = res;
579
580 return (fwn->magic == (unsigned long)&fw_cache) &&
581 !strcmp(fwn->name, match_data);
582}
583
584static struct fw_name_devm *fw_find_devm_name(struct device *dev,
585 const char *name)
586{
587 struct fw_name_devm *fwn;
588
589 fwn = devres_find(dev, fw_name_devm_release,
590 fw_devm_match, (void *)name);
591 return fwn;
592}
593
594static bool fw_cache_is_setup(struct device *dev, const char *name)
595{
596 struct fw_name_devm *fwn;
597
598 fwn = fw_find_devm_name(dev, name);
599 if (fwn)
600 return true;
601
602 return false;
603}
604
605/* add firmware name into devres list */
606static int fw_add_devm_name(struct device *dev, const char *name)
607{
608 struct fw_name_devm *fwn;
609
610 if (fw_cache_is_setup(dev, name))
611 return 0;
612
613 fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm),
614 GFP_KERNEL);
615 if (!fwn)
616 return -ENOMEM;
617 fwn->name = kstrdup_const(name, GFP_KERNEL);
618 if (!fwn->name) {
619 devres_free(fwn);
620 return -ENOMEM;
621 }
622
623 fwn->magic = (unsigned long)&fw_cache;
624 devres_add(dev, fwn);
625
626 return 0;
627}
628#else
629static bool fw_cache_is_setup(struct device *dev, const char *name)
630{
631 return false;
632}
633
634static int fw_add_devm_name(struct device *dev, const char *name)
635{
636 return 0;
637}
638#endif
639
640int assign_fw(struct firmware *fw, struct device *device,
641 enum fw_opt opt_flags)
642{
643 struct fw_priv *fw_priv = fw->priv;
644 int ret;
645
646 mutex_lock(&fw_lock);
647 if (!fw_priv->size || fw_state_is_aborted(fw_priv)) {
648 mutex_unlock(&fw_lock);
649 return -ENOENT;
650 }
651
652 /*
653 * add firmware name into devres list so that we can auto cache
654 * and uncache firmware for device.
655 *
656 * device may has been deleted already, but the problem
657 * should be fixed in devres or driver core.
658 */
659 /* don't cache firmware handled without uevent */
660 if (device && (opt_flags & FW_OPT_UEVENT) &&
661 !(opt_flags & FW_OPT_NOCACHE)) {
662 ret = fw_add_devm_name(device, fw_priv->fw_name);
663 if (ret) {
664 mutex_unlock(&fw_lock);
665 return ret;
666 }
667 }
668
669 /*
670 * After caching firmware image is started, let it piggyback
671 * on request firmware.
672 */
673 if (!(opt_flags & FW_OPT_NOCACHE) &&
674 fw_priv->fwc->state == FW_LOADER_START_CACHE) {
675 if (fw_cache_piggyback_on_request(fw_priv->fw_name))
676 kref_get(&fw_priv->ref);
677 }
678
679 /* pass the pages buffer to driver at the last minute */
680 fw_set_page_data(fw_priv, fw);
681 mutex_unlock(&fw_lock);
682 return 0;
683}
684
685/* prepare firmware and firmware_buf structs;
686 * return 0 if a firmware is already assigned, 1 if need to load one,
687 * or a negative error code
688 */
689static int
690_request_firmware_prepare(struct firmware **firmware_p, const char *name,
691 struct device *device, void *dbuf, size_t size,
692 enum fw_opt opt_flags)
693{
694 struct firmware *firmware;
695 struct fw_priv *fw_priv;
696 int ret;
697
698 *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
699 if (!firmware) {
700 dev_err(device, "%s: kmalloc(struct firmware) failed\n",
701 __func__);
702 return -ENOMEM;
703 }
704
705 if (fw_get_builtin_firmware(firmware, name, dbuf, size)) {
706 dev_dbg(device, "using built-in %s\n", name);
707 return 0; /* assigned */
708 }
709
710 ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size,
711 opt_flags);
712
713 /*
714 * bind with 'priv' now to avoid warning in failure path
715 * of requesting firmware.
716 */
717 firmware->priv = fw_priv;
718
719 if (ret > 0) {
720 ret = fw_state_wait(fw_priv);
721 if (!ret) {
722 fw_set_page_data(fw_priv, firmware);
723 return 0; /* assigned */
724 }
725 }
726
727 if (ret < 0)
728 return ret;
729 return 1; /* need to load */
730}
731
732/*
733 * Batched requests need only one wake, we need to do this step last due to the
734 * fallback mechanism. The buf is protected with kref_get(), and it won't be
735 * released until the last user calls release_firmware().
736 *
737 * Failed batched requests are possible as well, in such cases we just share
738 * the struct fw_priv and won't release it until all requests are woken
739 * and have gone through this same path.
740 */
741static void fw_abort_batch_reqs(struct firmware *fw)
742{
743 struct fw_priv *fw_priv;
744
745 /* Loaded directly? */
746 if (!fw || !fw->priv)
747 return;
748
749 fw_priv = fw->priv;
Olivier Deprez0e641232021-09-23 10:07:05 +0200750 mutex_lock(&fw_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000751 if (!fw_state_is_aborted(fw_priv))
752 fw_state_aborted(fw_priv);
Olivier Deprez0e641232021-09-23 10:07:05 +0200753 mutex_unlock(&fw_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000754}
755
756/* called from request_firmware() and request_firmware_work_func() */
757static int
758_request_firmware(const struct firmware **firmware_p, const char *name,
759 struct device *device, void *buf, size_t size,
760 enum fw_opt opt_flags)
761{
762 struct firmware *fw = NULL;
763 int ret;
764
765 if (!firmware_p)
766 return -EINVAL;
767
768 if (!name || name[0] == '\0') {
769 ret = -EINVAL;
770 goto out;
771 }
772
773 ret = _request_firmware_prepare(&fw, name, device, buf, size,
774 opt_flags);
775 if (ret <= 0) /* error or already assigned */
776 goto out;
777
David Brazdil0f672f62019-12-10 10:32:29 +0000778 ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL);
779#ifdef CONFIG_FW_LOADER_COMPRESS
780 if (ret == -ENOENT)
781 ret = fw_get_filesystem_firmware(device, fw->priv, ".xz",
782 fw_decompress_xz);
783#endif
784
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000785 if (ret) {
786 if (!(opt_flags & FW_OPT_NO_WARN))
787 dev_warn(device,
788 "Direct firmware load for %s failed with error %d\n",
789 name, ret);
790 ret = firmware_fallback_sysfs(fw, name, device, opt_flags, ret);
791 } else
792 ret = assign_fw(fw, device, opt_flags);
793
794 out:
795 if (ret < 0) {
796 fw_abort_batch_reqs(fw);
797 release_firmware(fw);
798 fw = NULL;
799 }
800
801 *firmware_p = fw;
802 return ret;
803}
804
805/**
806 * request_firmware() - send firmware request and wait for it
807 * @firmware_p: pointer to firmware image
808 * @name: name of firmware file
809 * @device: device for which firmware is being loaded
810 *
811 * @firmware_p will be used to return a firmware image by the name
812 * of @name for device @device.
813 *
814 * Should be called from user context where sleeping is allowed.
815 *
816 * @name will be used as $FIRMWARE in the uevent environment and
817 * should be distinctive enough not to be confused with any other
818 * firmware image for this or any other device.
819 *
820 * Caller must hold the reference count of @device.
821 *
822 * The function can be called safely inside device's suspend and
823 * resume callback.
824 **/
825int
826request_firmware(const struct firmware **firmware_p, const char *name,
827 struct device *device)
828{
829 int ret;
830
831 /* Need to pin this module until return */
832 __module_get(THIS_MODULE);
833 ret = _request_firmware(firmware_p, name, device, NULL, 0,
834 FW_OPT_UEVENT);
835 module_put(THIS_MODULE);
836 return ret;
837}
838EXPORT_SYMBOL(request_firmware);
839
840/**
841 * firmware_request_nowarn() - request for an optional fw module
842 * @firmware: pointer to firmware image
843 * @name: name of firmware file
844 * @device: device for which firmware is being loaded
845 *
846 * This function is similar in behaviour to request_firmware(), except
847 * it doesn't produce warning messages when the file is not found.
848 * The sysfs fallback mechanism is enabled if direct filesystem lookup fails,
849 * however, however failures to find the firmware file with it are still
850 * suppressed. It is therefore up to the driver to check for the return value
851 * of this call and to decide when to inform the users of errors.
852 **/
853int firmware_request_nowarn(const struct firmware **firmware, const char *name,
854 struct device *device)
855{
856 int ret;
857
858 /* Need to pin this module until return */
859 __module_get(THIS_MODULE);
860 ret = _request_firmware(firmware, name, device, NULL, 0,
861 FW_OPT_UEVENT | FW_OPT_NO_WARN);
862 module_put(THIS_MODULE);
863 return ret;
864}
865EXPORT_SYMBOL_GPL(firmware_request_nowarn);
866
867/**
868 * request_firmware_direct() - load firmware directly without usermode helper
869 * @firmware_p: pointer to firmware image
870 * @name: name of firmware file
871 * @device: device for which firmware is being loaded
872 *
873 * This function works pretty much like request_firmware(), but this doesn't
874 * fall back to usermode helper even if the firmware couldn't be loaded
875 * directly from fs. Hence it's useful for loading optional firmwares, which
876 * aren't always present, without extra long timeouts of udev.
877 **/
878int request_firmware_direct(const struct firmware **firmware_p,
879 const char *name, struct device *device)
880{
881 int ret;
882
883 __module_get(THIS_MODULE);
884 ret = _request_firmware(firmware_p, name, device, NULL, 0,
885 FW_OPT_UEVENT | FW_OPT_NO_WARN |
886 FW_OPT_NOFALLBACK);
887 module_put(THIS_MODULE);
888 return ret;
889}
890EXPORT_SYMBOL_GPL(request_firmware_direct);
891
892/**
893 * firmware_request_cache() - cache firmware for suspend so resume can use it
894 * @name: name of firmware file
895 * @device: device for which firmware should be cached for
896 *
897 * There are some devices with an optimization that enables the device to not
898 * require loading firmware on system reboot. This optimization may still
899 * require the firmware present on resume from suspend. This routine can be
900 * used to ensure the firmware is present on resume from suspend in these
901 * situations. This helper is not compatible with drivers which use
902 * request_firmware_into_buf() or request_firmware_nowait() with no uevent set.
903 **/
904int firmware_request_cache(struct device *device, const char *name)
905{
906 int ret;
907
908 mutex_lock(&fw_lock);
909 ret = fw_add_devm_name(device, name);
910 mutex_unlock(&fw_lock);
911
912 return ret;
913}
914EXPORT_SYMBOL_GPL(firmware_request_cache);
915
916/**
917 * request_firmware_into_buf() - load firmware into a previously allocated buffer
918 * @firmware_p: pointer to firmware image
919 * @name: name of firmware file
920 * @device: device for which firmware is being loaded and DMA region allocated
921 * @buf: address of buffer to load firmware into
922 * @size: size of buffer
923 *
924 * This function works pretty much like request_firmware(), but it doesn't
925 * allocate a buffer to hold the firmware data. Instead, the firmware
926 * is loaded directly into the buffer pointed to by @buf and the @firmware_p
927 * data member is pointed at @buf.
928 *
929 * This function doesn't cache firmware either.
930 */
931int
932request_firmware_into_buf(const struct firmware **firmware_p, const char *name,
933 struct device *device, void *buf, size_t size)
934{
935 int ret;
936
937 if (fw_cache_is_setup(device, name))
938 return -EOPNOTSUPP;
939
940 __module_get(THIS_MODULE);
941 ret = _request_firmware(firmware_p, name, device, buf, size,
942 FW_OPT_UEVENT | FW_OPT_NOCACHE);
943 module_put(THIS_MODULE);
944 return ret;
945}
946EXPORT_SYMBOL(request_firmware_into_buf);
947
948/**
949 * release_firmware() - release the resource associated with a firmware image
950 * @fw: firmware resource to release
951 **/
952void release_firmware(const struct firmware *fw)
953{
954 if (fw) {
955 if (!fw_is_builtin_firmware(fw))
956 firmware_free_data(fw);
957 kfree(fw);
958 }
959}
960EXPORT_SYMBOL(release_firmware);
961
962/* Async support */
963struct firmware_work {
964 struct work_struct work;
965 struct module *module;
966 const char *name;
967 struct device *device;
968 void *context;
969 void (*cont)(const struct firmware *fw, void *context);
970 enum fw_opt opt_flags;
971};
972
973static void request_firmware_work_func(struct work_struct *work)
974{
975 struct firmware_work *fw_work;
976 const struct firmware *fw;
977
978 fw_work = container_of(work, struct firmware_work, work);
979
980 _request_firmware(&fw, fw_work->name, fw_work->device, NULL, 0,
981 fw_work->opt_flags);
982 fw_work->cont(fw, fw_work->context);
983 put_device(fw_work->device); /* taken in request_firmware_nowait() */
984
985 module_put(fw_work->module);
986 kfree_const(fw_work->name);
987 kfree(fw_work);
988}
989
990/**
991 * request_firmware_nowait() - asynchronous version of request_firmware
992 * @module: module requesting the firmware
993 * @uevent: sends uevent to copy the firmware image if this flag
994 * is non-zero else the firmware copy must be done manually.
995 * @name: name of firmware file
996 * @device: device for which firmware is being loaded
997 * @gfp: allocation flags
998 * @context: will be passed over to @cont, and
999 * @fw may be %NULL if firmware request fails.
1000 * @cont: function will be called asynchronously when the firmware
1001 * request is over.
1002 *
1003 * Caller must hold the reference count of @device.
1004 *
1005 * Asynchronous variant of request_firmware() for user contexts:
1006 * - sleep for as small periods as possible since it may
1007 * increase kernel boot time of built-in device drivers
1008 * requesting firmware in their ->probe() methods, if
1009 * @gfp is GFP_KERNEL.
1010 *
1011 * - can't sleep at all if @gfp is GFP_ATOMIC.
1012 **/
1013int
1014request_firmware_nowait(
1015 struct module *module, bool uevent,
1016 const char *name, struct device *device, gfp_t gfp, void *context,
1017 void (*cont)(const struct firmware *fw, void *context))
1018{
1019 struct firmware_work *fw_work;
1020
1021 fw_work = kzalloc(sizeof(struct firmware_work), gfp);
1022 if (!fw_work)
1023 return -ENOMEM;
1024
1025 fw_work->module = module;
1026 fw_work->name = kstrdup_const(name, gfp);
1027 if (!fw_work->name) {
1028 kfree(fw_work);
1029 return -ENOMEM;
1030 }
1031 fw_work->device = device;
1032 fw_work->context = context;
1033 fw_work->cont = cont;
1034 fw_work->opt_flags = FW_OPT_NOWAIT |
1035 (uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER);
1036
1037 if (!uevent && fw_cache_is_setup(device, name)) {
1038 kfree_const(fw_work->name);
1039 kfree(fw_work);
1040 return -EOPNOTSUPP;
1041 }
1042
1043 if (!try_module_get(module)) {
1044 kfree_const(fw_work->name);
1045 kfree(fw_work);
1046 return -EFAULT;
1047 }
1048
1049 get_device(fw_work->device);
1050 INIT_WORK(&fw_work->work, request_firmware_work_func);
1051 schedule_work(&fw_work->work);
1052 return 0;
1053}
1054EXPORT_SYMBOL(request_firmware_nowait);
1055
1056#ifdef CONFIG_PM_SLEEP
1057static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
1058
1059/**
1060 * cache_firmware() - cache one firmware image in kernel memory space
1061 * @fw_name: the firmware image name
1062 *
1063 * Cache firmware in kernel memory so that drivers can use it when
1064 * system isn't ready for them to request firmware image from userspace.
1065 * Once it returns successfully, driver can use request_firmware or its
1066 * nowait version to get the cached firmware without any interacting
1067 * with userspace
1068 *
1069 * Return 0 if the firmware image has been cached successfully
1070 * Return !0 otherwise
1071 *
1072 */
1073static int cache_firmware(const char *fw_name)
1074{
1075 int ret;
1076 const struct firmware *fw;
1077
1078 pr_debug("%s: %s\n", __func__, fw_name);
1079
1080 ret = request_firmware(&fw, fw_name, NULL);
1081 if (!ret)
1082 kfree(fw);
1083
1084 pr_debug("%s: %s ret=%d\n", __func__, fw_name, ret);
1085
1086 return ret;
1087}
1088
1089static struct fw_priv *lookup_fw_priv(const char *fw_name)
1090{
1091 struct fw_priv *tmp;
1092 struct firmware_cache *fwc = &fw_cache;
1093
1094 spin_lock(&fwc->lock);
1095 tmp = __lookup_fw_priv(fw_name);
1096 spin_unlock(&fwc->lock);
1097
1098 return tmp;
1099}
1100
1101/**
1102 * uncache_firmware() - remove one cached firmware image
1103 * @fw_name: the firmware image name
1104 *
1105 * Uncache one firmware image which has been cached successfully
1106 * before.
1107 *
1108 * Return 0 if the firmware cache has been removed successfully
1109 * Return !0 otherwise
1110 *
1111 */
1112static int uncache_firmware(const char *fw_name)
1113{
1114 struct fw_priv *fw_priv;
1115 struct firmware fw;
1116
1117 pr_debug("%s: %s\n", __func__, fw_name);
1118
1119 if (fw_get_builtin_firmware(&fw, fw_name, NULL, 0))
1120 return 0;
1121
1122 fw_priv = lookup_fw_priv(fw_name);
1123 if (fw_priv) {
1124 free_fw_priv(fw_priv);
1125 return 0;
1126 }
1127
1128 return -EINVAL;
1129}
1130
1131static struct fw_cache_entry *alloc_fw_cache_entry(const char *name)
1132{
1133 struct fw_cache_entry *fce;
1134
1135 fce = kzalloc(sizeof(*fce), GFP_ATOMIC);
1136 if (!fce)
1137 goto exit;
1138
1139 fce->name = kstrdup_const(name, GFP_ATOMIC);
1140 if (!fce->name) {
1141 kfree(fce);
1142 fce = NULL;
1143 goto exit;
1144 }
1145exit:
1146 return fce;
1147}
1148
1149static int __fw_entry_found(const char *name)
1150{
1151 struct firmware_cache *fwc = &fw_cache;
1152 struct fw_cache_entry *fce;
1153
1154 list_for_each_entry(fce, &fwc->fw_names, list) {
1155 if (!strcmp(fce->name, name))
1156 return 1;
1157 }
1158 return 0;
1159}
1160
1161static int fw_cache_piggyback_on_request(const char *name)
1162{
1163 struct firmware_cache *fwc = &fw_cache;
1164 struct fw_cache_entry *fce;
1165 int ret = 0;
1166
1167 spin_lock(&fwc->name_lock);
1168 if (__fw_entry_found(name))
1169 goto found;
1170
1171 fce = alloc_fw_cache_entry(name);
1172 if (fce) {
1173 ret = 1;
1174 list_add(&fce->list, &fwc->fw_names);
1175 pr_debug("%s: fw: %s\n", __func__, name);
1176 }
1177found:
1178 spin_unlock(&fwc->name_lock);
1179 return ret;
1180}
1181
1182static void free_fw_cache_entry(struct fw_cache_entry *fce)
1183{
1184 kfree_const(fce->name);
1185 kfree(fce);
1186}
1187
1188static void __async_dev_cache_fw_image(void *fw_entry,
1189 async_cookie_t cookie)
1190{
1191 struct fw_cache_entry *fce = fw_entry;
1192 struct firmware_cache *fwc = &fw_cache;
1193 int ret;
1194
1195 ret = cache_firmware(fce->name);
1196 if (ret) {
1197 spin_lock(&fwc->name_lock);
1198 list_del(&fce->list);
1199 spin_unlock(&fwc->name_lock);
1200
1201 free_fw_cache_entry(fce);
1202 }
1203}
1204
1205/* called with dev->devres_lock held */
1206static void dev_create_fw_entry(struct device *dev, void *res,
1207 void *data)
1208{
1209 struct fw_name_devm *fwn = res;
1210 const char *fw_name = fwn->name;
1211 struct list_head *head = data;
1212 struct fw_cache_entry *fce;
1213
1214 fce = alloc_fw_cache_entry(fw_name);
1215 if (fce)
1216 list_add(&fce->list, head);
1217}
1218
1219static int devm_name_match(struct device *dev, void *res,
1220 void *match_data)
1221{
1222 struct fw_name_devm *fwn = res;
1223 return (fwn->magic == (unsigned long)match_data);
1224}
1225
1226static void dev_cache_fw_image(struct device *dev, void *data)
1227{
1228 LIST_HEAD(todo);
1229 struct fw_cache_entry *fce;
1230 struct fw_cache_entry *fce_next;
1231 struct firmware_cache *fwc = &fw_cache;
1232
1233 devres_for_each_res(dev, fw_name_devm_release,
1234 devm_name_match, &fw_cache,
1235 dev_create_fw_entry, &todo);
1236
1237 list_for_each_entry_safe(fce, fce_next, &todo, list) {
1238 list_del(&fce->list);
1239
1240 spin_lock(&fwc->name_lock);
1241 /* only one cache entry for one firmware */
1242 if (!__fw_entry_found(fce->name)) {
1243 list_add(&fce->list, &fwc->fw_names);
1244 } else {
1245 free_fw_cache_entry(fce);
1246 fce = NULL;
1247 }
1248 spin_unlock(&fwc->name_lock);
1249
1250 if (fce)
1251 async_schedule_domain(__async_dev_cache_fw_image,
1252 (void *)fce,
1253 &fw_cache_domain);
1254 }
1255}
1256
1257static void __device_uncache_fw_images(void)
1258{
1259 struct firmware_cache *fwc = &fw_cache;
1260 struct fw_cache_entry *fce;
1261
1262 spin_lock(&fwc->name_lock);
1263 while (!list_empty(&fwc->fw_names)) {
1264 fce = list_entry(fwc->fw_names.next,
1265 struct fw_cache_entry, list);
1266 list_del(&fce->list);
1267 spin_unlock(&fwc->name_lock);
1268
1269 uncache_firmware(fce->name);
1270 free_fw_cache_entry(fce);
1271
1272 spin_lock(&fwc->name_lock);
1273 }
1274 spin_unlock(&fwc->name_lock);
1275}
1276
1277/**
1278 * device_cache_fw_images() - cache devices' firmware
1279 *
1280 * If one device called request_firmware or its nowait version
1281 * successfully before, the firmware names are recored into the
1282 * device's devres link list, so device_cache_fw_images can call
1283 * cache_firmware() to cache these firmwares for the device,
1284 * then the device driver can load its firmwares easily at
1285 * time when system is not ready to complete loading firmware.
1286 */
1287static void device_cache_fw_images(void)
1288{
1289 struct firmware_cache *fwc = &fw_cache;
1290 DEFINE_WAIT(wait);
1291
1292 pr_debug("%s\n", __func__);
1293
1294 /* cancel uncache work */
1295 cancel_delayed_work_sync(&fwc->work);
1296
1297 fw_fallback_set_cache_timeout();
1298
1299 mutex_lock(&fw_lock);
1300 fwc->state = FW_LOADER_START_CACHE;
1301 dpm_for_each_dev(NULL, dev_cache_fw_image);
1302 mutex_unlock(&fw_lock);
1303
1304 /* wait for completion of caching firmware for all devices */
1305 async_synchronize_full_domain(&fw_cache_domain);
1306
1307 fw_fallback_set_default_timeout();
1308}
1309
1310/**
1311 * device_uncache_fw_images() - uncache devices' firmware
1312 *
1313 * uncache all firmwares which have been cached successfully
1314 * by device_uncache_fw_images earlier
1315 */
1316static void device_uncache_fw_images(void)
1317{
1318 pr_debug("%s\n", __func__);
1319 __device_uncache_fw_images();
1320}
1321
1322static void device_uncache_fw_images_work(struct work_struct *work)
1323{
1324 device_uncache_fw_images();
1325}
1326
1327/**
1328 * device_uncache_fw_images_delay() - uncache devices firmwares
1329 * @delay: number of milliseconds to delay uncache device firmwares
1330 *
1331 * uncache all devices's firmwares which has been cached successfully
1332 * by device_cache_fw_images after @delay milliseconds.
1333 */
1334static void device_uncache_fw_images_delay(unsigned long delay)
1335{
1336 queue_delayed_work(system_power_efficient_wq, &fw_cache.work,
1337 msecs_to_jiffies(delay));
1338}
1339
1340static int fw_pm_notify(struct notifier_block *notify_block,
1341 unsigned long mode, void *unused)
1342{
1343 switch (mode) {
1344 case PM_HIBERNATION_PREPARE:
1345 case PM_SUSPEND_PREPARE:
1346 case PM_RESTORE_PREPARE:
1347 /*
1348 * kill pending fallback requests with a custom fallback
1349 * to avoid stalling suspend.
1350 */
1351 kill_pending_fw_fallback_reqs(true);
1352 device_cache_fw_images();
1353 break;
1354
1355 case PM_POST_SUSPEND:
1356 case PM_POST_HIBERNATION:
1357 case PM_POST_RESTORE:
1358 /*
1359 * In case that system sleep failed and syscore_suspend is
1360 * not called.
1361 */
1362 mutex_lock(&fw_lock);
1363 fw_cache.state = FW_LOADER_NO_CACHE;
1364 mutex_unlock(&fw_lock);
1365
1366 device_uncache_fw_images_delay(10 * MSEC_PER_SEC);
1367 break;
1368 }
1369
1370 return 0;
1371}
1372
1373/* stop caching firmware once syscore_suspend is reached */
1374static int fw_suspend(void)
1375{
1376 fw_cache.state = FW_LOADER_NO_CACHE;
1377 return 0;
1378}
1379
1380static struct syscore_ops fw_syscore_ops = {
1381 .suspend = fw_suspend,
1382};
1383
1384static int __init register_fw_pm_ops(void)
1385{
1386 int ret;
1387
1388 spin_lock_init(&fw_cache.name_lock);
1389 INIT_LIST_HEAD(&fw_cache.fw_names);
1390
1391 INIT_DELAYED_WORK(&fw_cache.work,
1392 device_uncache_fw_images_work);
1393
1394 fw_cache.pm_notify.notifier_call = fw_pm_notify;
1395 ret = register_pm_notifier(&fw_cache.pm_notify);
1396 if (ret)
1397 return ret;
1398
1399 register_syscore_ops(&fw_syscore_ops);
1400
1401 return ret;
1402}
1403
1404static inline void unregister_fw_pm_ops(void)
1405{
1406 unregister_syscore_ops(&fw_syscore_ops);
1407 unregister_pm_notifier(&fw_cache.pm_notify);
1408}
1409#else
1410static int fw_cache_piggyback_on_request(const char *name)
1411{
1412 return 0;
1413}
1414static inline int register_fw_pm_ops(void)
1415{
1416 return 0;
1417}
1418static inline void unregister_fw_pm_ops(void)
1419{
1420}
1421#endif
1422
1423static void __init fw_cache_init(void)
1424{
1425 spin_lock_init(&fw_cache.lock);
1426 INIT_LIST_HEAD(&fw_cache.head);
1427 fw_cache.state = FW_LOADER_NO_CACHE;
1428}
1429
1430static int fw_shutdown_notify(struct notifier_block *unused1,
1431 unsigned long unused2, void *unused3)
1432{
1433 /*
1434 * Kill all pending fallback requests to avoid both stalling shutdown,
1435 * and avoid a deadlock with the usermode_lock.
1436 */
1437 kill_pending_fw_fallback_reqs(false);
1438
1439 return NOTIFY_DONE;
1440}
1441
1442static struct notifier_block fw_shutdown_nb = {
1443 .notifier_call = fw_shutdown_notify,
1444};
1445
1446static int __init firmware_class_init(void)
1447{
1448 int ret;
1449
1450 /* No need to unfold these on exit */
1451 fw_cache_init();
1452
1453 ret = register_fw_pm_ops();
1454 if (ret)
1455 return ret;
1456
1457 ret = register_reboot_notifier(&fw_shutdown_nb);
1458 if (ret)
1459 goto out;
1460
1461 return register_sysfs_loader();
1462
1463out:
1464 unregister_fw_pm_ops();
1465 return ret;
1466}
1467
1468static void __exit firmware_class_exit(void)
1469{
1470 unregister_fw_pm_ops();
1471 unregister_reboot_notifier(&fw_shutdown_nb);
1472 unregister_sysfs_loader();
1473}
1474
1475fs_initcall(firmware_class_init);
1476module_exit(firmware_class_exit);