blob: 5f3e5d83725903008d46fb269e744df77ee02ee2 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/types.h>
4#include <linux/kconfig.h>
5#include <linux/list.h>
6#include <linux/slab.h>
7#include <linux/security.h>
8#include <linux/highmem.h>
9#include <linux/umh.h>
10#include <linux/sysctl.h>
11#include <linux/vmalloc.h>
12
13#include "fallback.h"
14#include "firmware.h"
15
16/*
17 * firmware fallback mechanism
18 */
19
20extern struct firmware_fallback_config fw_fallback_config;
21
22/* These getters are vetted to use int properly */
23static inline int __firmware_loading_timeout(void)
24{
25 return fw_fallback_config.loading_timeout;
26}
27
28/* These setters are vetted to use int properly */
29static void __fw_fallback_set_timeout(int timeout)
30{
31 fw_fallback_config.loading_timeout = timeout;
32}
33
34/*
35 * use small loading timeout for caching devices' firmware because all these
36 * firmware images have been loaded successfully at lease once, also system is
37 * ready for completing firmware loading now. The maximum size of firmware in
38 * current distributions is about 2M bytes, so 10 secs should be enough.
39 */
40void fw_fallback_set_cache_timeout(void)
41{
42 fw_fallback_config.old_timeout = __firmware_loading_timeout();
43 __fw_fallback_set_timeout(10);
44}
45
46/* Restores the timeout to the value last configured during normal operation */
47void fw_fallback_set_default_timeout(void)
48{
49 __fw_fallback_set_timeout(fw_fallback_config.old_timeout);
50}
51
52static long firmware_loading_timeout(void)
53{
54 return __firmware_loading_timeout() > 0 ?
55 __firmware_loading_timeout() * HZ : MAX_JIFFY_OFFSET;
56}
57
58static inline bool fw_sysfs_done(struct fw_priv *fw_priv)
59{
60 return __fw_state_check(fw_priv, FW_STATUS_DONE);
61}
62
63static inline bool fw_sysfs_loading(struct fw_priv *fw_priv)
64{
65 return __fw_state_check(fw_priv, FW_STATUS_LOADING);
66}
67
68static inline int fw_sysfs_wait_timeout(struct fw_priv *fw_priv, long timeout)
69{
70 return __fw_state_wait_common(fw_priv, timeout);
71}
72
73struct fw_sysfs {
74 bool nowait;
75 struct device dev;
76 struct fw_priv *fw_priv;
77 struct firmware *fw;
78};
79
80static struct fw_sysfs *to_fw_sysfs(struct device *dev)
81{
82 return container_of(dev, struct fw_sysfs, dev);
83}
84
85static void __fw_load_abort(struct fw_priv *fw_priv)
86{
87 /*
88 * There is a small window in which user can write to 'loading'
Olivier Deprez0e641232021-09-23 10:07:05 +020089 * between loading done/aborted and disappearance of 'loading'
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000090 */
Olivier Deprez0e641232021-09-23 10:07:05 +020091 if (fw_state_is_aborted(fw_priv) || fw_sysfs_done(fw_priv))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000092 return;
93
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000094 fw_state_aborted(fw_priv);
95}
96
97static void fw_load_abort(struct fw_sysfs *fw_sysfs)
98{
99 struct fw_priv *fw_priv = fw_sysfs->fw_priv;
100
101 __fw_load_abort(fw_priv);
102}
103
104static LIST_HEAD(pending_fw_head);
105
106void kill_pending_fw_fallback_reqs(bool only_kill_custom)
107{
108 struct fw_priv *fw_priv;
109 struct fw_priv *next;
110
111 mutex_lock(&fw_lock);
112 list_for_each_entry_safe(fw_priv, next, &pending_fw_head,
113 pending_list) {
114 if (!fw_priv->need_uevent || !only_kill_custom)
115 __fw_load_abort(fw_priv);
116 }
117 mutex_unlock(&fw_lock);
118}
119
120static ssize_t timeout_show(struct class *class, struct class_attribute *attr,
121 char *buf)
122{
123 return sprintf(buf, "%d\n", __firmware_loading_timeout());
124}
125
126/**
127 * firmware_timeout_store() - set number of seconds to wait for firmware
128 * @class: device class pointer
129 * @attr: device attribute pointer
130 * @buf: buffer to scan for timeout value
131 * @count: number of bytes in @buf
132 *
133 * Sets the number of seconds to wait for the firmware. Once
134 * this expires an error will be returned to the driver and no
135 * firmware will be provided.
136 *
137 * Note: zero means 'wait forever'.
138 **/
139static ssize_t timeout_store(struct class *class, struct class_attribute *attr,
140 const char *buf, size_t count)
141{
142 int tmp_loading_timeout = simple_strtol(buf, NULL, 10);
143
144 if (tmp_loading_timeout < 0)
145 tmp_loading_timeout = 0;
146
147 __fw_fallback_set_timeout(tmp_loading_timeout);
148
149 return count;
150}
151static CLASS_ATTR_RW(timeout);
152
153static struct attribute *firmware_class_attrs[] = {
154 &class_attr_timeout.attr,
155 NULL,
156};
157ATTRIBUTE_GROUPS(firmware_class);
158
159static void fw_dev_release(struct device *dev)
160{
161 struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
162
163 kfree(fw_sysfs);
164}
165
166static int do_firmware_uevent(struct fw_sysfs *fw_sysfs, struct kobj_uevent_env *env)
167{
168 if (add_uevent_var(env, "FIRMWARE=%s", fw_sysfs->fw_priv->fw_name))
169 return -ENOMEM;
170 if (add_uevent_var(env, "TIMEOUT=%i", __firmware_loading_timeout()))
171 return -ENOMEM;
172 if (add_uevent_var(env, "ASYNC=%d", fw_sysfs->nowait))
173 return -ENOMEM;
174
175 return 0;
176}
177
178static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
179{
180 struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
181 int err = 0;
182
183 mutex_lock(&fw_lock);
184 if (fw_sysfs->fw_priv)
185 err = do_firmware_uevent(fw_sysfs, env);
186 mutex_unlock(&fw_lock);
187 return err;
188}
189
190static struct class firmware_class = {
191 .name = "firmware",
192 .class_groups = firmware_class_groups,
193 .dev_uevent = firmware_uevent,
194 .dev_release = fw_dev_release,
195};
196
197int register_sysfs_loader(void)
198{
199 return class_register(&firmware_class);
200}
201
202void unregister_sysfs_loader(void)
203{
204 class_unregister(&firmware_class);
205}
206
207static ssize_t firmware_loading_show(struct device *dev,
208 struct device_attribute *attr, char *buf)
209{
210 struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
211 int loading = 0;
212
213 mutex_lock(&fw_lock);
214 if (fw_sysfs->fw_priv)
215 loading = fw_sysfs_loading(fw_sysfs->fw_priv);
216 mutex_unlock(&fw_lock);
217
218 return sprintf(buf, "%d\n", loading);
219}
220
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000221/**
222 * firmware_loading_store() - set value in the 'loading' control file
223 * @dev: device pointer
224 * @attr: device attribute pointer
225 * @buf: buffer to scan for loading control value
226 * @count: number of bytes in @buf
227 *
228 * The relevant values are:
229 *
230 * 1: Start a load, discarding any previous partial load.
231 * 0: Conclude the load and hand the data to the driver code.
232 * -1: Conclude the load with an error and discard any written data.
233 **/
234static ssize_t firmware_loading_store(struct device *dev,
235 struct device_attribute *attr,
236 const char *buf, size_t count)
237{
238 struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
239 struct fw_priv *fw_priv;
240 ssize_t written = count;
241 int loading = simple_strtol(buf, NULL, 10);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000242
243 mutex_lock(&fw_lock);
244 fw_priv = fw_sysfs->fw_priv;
245 if (fw_state_is_aborted(fw_priv))
246 goto out;
247
248 switch (loading) {
249 case 1:
250 /* discarding any previous partial load */
251 if (!fw_sysfs_done(fw_priv)) {
David Brazdil0f672f62019-12-10 10:32:29 +0000252 fw_free_paged_buf(fw_priv);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000253 fw_state_start(fw_priv);
254 }
255 break;
256 case 0:
257 if (fw_sysfs_loading(fw_priv)) {
258 int rc;
259
260 /*
261 * Several loading requests may be pending on
262 * one same firmware buf, so let all requests
263 * see the mapped 'buf->data' once the loading
264 * is completed.
265 * */
David Brazdil0f672f62019-12-10 10:32:29 +0000266 rc = fw_map_paged_buf(fw_priv);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000267 if (rc)
268 dev_err(dev, "%s: map pages failed\n",
269 __func__);
270 else
271 rc = security_kernel_post_read_file(NULL,
272 fw_priv->data, fw_priv->size,
273 READING_FIRMWARE);
274
275 /*
276 * Same logic as fw_load_abort, only the DONE bit
277 * is ignored and we set ABORT only on failure.
278 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000279 if (rc) {
280 fw_state_aborted(fw_priv);
281 written = rc;
282 } else {
283 fw_state_done(fw_priv);
284 }
285 break;
286 }
287 /* fallthrough */
288 default:
289 dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
290 /* fallthrough */
291 case -1:
292 fw_load_abort(fw_sysfs);
293 break;
294 }
295out:
296 mutex_unlock(&fw_lock);
297 return written;
298}
299
300static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
301
302static void firmware_rw_data(struct fw_priv *fw_priv, char *buffer,
303 loff_t offset, size_t count, bool read)
304{
305 if (read)
306 memcpy(buffer, fw_priv->data + offset, count);
307 else
308 memcpy(fw_priv->data + offset, buffer, count);
309}
310
311static void firmware_rw(struct fw_priv *fw_priv, char *buffer,
312 loff_t offset, size_t count, bool read)
313{
314 while (count) {
315 void *page_data;
316 int page_nr = offset >> PAGE_SHIFT;
317 int page_ofs = offset & (PAGE_SIZE-1);
318 int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
319
320 page_data = kmap(fw_priv->pages[page_nr]);
321
322 if (read)
323 memcpy(buffer, page_data + page_ofs, page_cnt);
324 else
325 memcpy(page_data + page_ofs, buffer, page_cnt);
326
327 kunmap(fw_priv->pages[page_nr]);
328 buffer += page_cnt;
329 offset += page_cnt;
330 count -= page_cnt;
331 }
332}
333
334static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
335 struct bin_attribute *bin_attr,
336 char *buffer, loff_t offset, size_t count)
337{
338 struct device *dev = kobj_to_dev(kobj);
339 struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
340 struct fw_priv *fw_priv;
341 ssize_t ret_count;
342
343 mutex_lock(&fw_lock);
344 fw_priv = fw_sysfs->fw_priv;
345 if (!fw_priv || fw_sysfs_done(fw_priv)) {
346 ret_count = -ENODEV;
347 goto out;
348 }
349 if (offset > fw_priv->size) {
350 ret_count = 0;
351 goto out;
352 }
353 if (count > fw_priv->size - offset)
354 count = fw_priv->size - offset;
355
356 ret_count = count;
357
358 if (fw_priv->data)
359 firmware_rw_data(fw_priv, buffer, offset, count, true);
360 else
361 firmware_rw(fw_priv, buffer, offset, count, true);
362
363out:
364 mutex_unlock(&fw_lock);
365 return ret_count;
366}
367
368static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size)
369{
David Brazdil0f672f62019-12-10 10:32:29 +0000370 int err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000371
David Brazdil0f672f62019-12-10 10:32:29 +0000372 err = fw_grow_paged_buf(fw_sysfs->fw_priv,
373 PAGE_ALIGN(min_size) >> PAGE_SHIFT);
374 if (err)
375 fw_load_abort(fw_sysfs);
376 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000377}
378
379/**
380 * firmware_data_write() - write method for firmware
381 * @filp: open sysfs file
382 * @kobj: kobject for the device
383 * @bin_attr: bin_attr structure
384 * @buffer: buffer being written
385 * @offset: buffer offset for write in total data store area
386 * @count: buffer size
387 *
388 * Data written to the 'data' attribute will be later handed to
389 * the driver as a firmware image.
390 **/
391static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
392 struct bin_attribute *bin_attr,
393 char *buffer, loff_t offset, size_t count)
394{
395 struct device *dev = kobj_to_dev(kobj);
396 struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
397 struct fw_priv *fw_priv;
398 ssize_t retval;
399
400 if (!capable(CAP_SYS_RAWIO))
401 return -EPERM;
402
403 mutex_lock(&fw_lock);
404 fw_priv = fw_sysfs->fw_priv;
405 if (!fw_priv || fw_sysfs_done(fw_priv)) {
406 retval = -ENODEV;
407 goto out;
408 }
409
410 if (fw_priv->data) {
411 if (offset + count > fw_priv->allocated_size) {
412 retval = -ENOMEM;
413 goto out;
414 }
415 firmware_rw_data(fw_priv, buffer, offset, count, false);
416 retval = count;
417 } else {
418 retval = fw_realloc_pages(fw_sysfs, offset + count);
419 if (retval)
420 goto out;
421
422 retval = count;
423 firmware_rw(fw_priv, buffer, offset, count, false);
424 }
425
426 fw_priv->size = max_t(size_t, offset + count, fw_priv->size);
427out:
428 mutex_unlock(&fw_lock);
429 return retval;
430}
431
432static struct bin_attribute firmware_attr_data = {
433 .attr = { .name = "data", .mode = 0644 },
434 .size = 0,
435 .read = firmware_data_read,
436 .write = firmware_data_write,
437};
438
439static struct attribute *fw_dev_attrs[] = {
440 &dev_attr_loading.attr,
441 NULL
442};
443
444static struct bin_attribute *fw_dev_bin_attrs[] = {
445 &firmware_attr_data,
446 NULL
447};
448
449static const struct attribute_group fw_dev_attr_group = {
450 .attrs = fw_dev_attrs,
451 .bin_attrs = fw_dev_bin_attrs,
452};
453
454static const struct attribute_group *fw_dev_attr_groups[] = {
455 &fw_dev_attr_group,
456 NULL
457};
458
459static struct fw_sysfs *
460fw_create_instance(struct firmware *firmware, const char *fw_name,
461 struct device *device, enum fw_opt opt_flags)
462{
463 struct fw_sysfs *fw_sysfs;
464 struct device *f_dev;
465
466 fw_sysfs = kzalloc(sizeof(*fw_sysfs), GFP_KERNEL);
467 if (!fw_sysfs) {
468 fw_sysfs = ERR_PTR(-ENOMEM);
469 goto exit;
470 }
471
472 fw_sysfs->nowait = !!(opt_flags & FW_OPT_NOWAIT);
473 fw_sysfs->fw = firmware;
474 f_dev = &fw_sysfs->dev;
475
476 device_initialize(f_dev);
477 dev_set_name(f_dev, "%s", fw_name);
478 f_dev->parent = device;
479 f_dev->class = &firmware_class;
480 f_dev->groups = fw_dev_attr_groups;
481exit:
482 return fw_sysfs;
483}
484
485/**
486 * fw_load_sysfs_fallback() - load a firmware via the sysfs fallback mechanism
487 * @fw_sysfs: firmware sysfs information for the firmware to load
488 * @opt_flags: flags of options, FW_OPT_*
489 * @timeout: timeout to wait for the load
490 *
491 * In charge of constructing a sysfs fallback interface for firmware loading.
492 **/
493static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs,
494 enum fw_opt opt_flags, long timeout)
495{
496 int retval = 0;
497 struct device *f_dev = &fw_sysfs->dev;
498 struct fw_priv *fw_priv = fw_sysfs->fw_priv;
499
500 /* fall back on userspace loading */
501 if (!fw_priv->data)
502 fw_priv->is_paged_buf = true;
503
504 dev_set_uevent_suppress(f_dev, true);
505
506 retval = device_add(f_dev);
507 if (retval) {
508 dev_err(f_dev, "%s: device_register failed\n", __func__);
509 goto err_put_dev;
510 }
511
512 mutex_lock(&fw_lock);
Olivier Deprez0e641232021-09-23 10:07:05 +0200513 if (fw_state_is_aborted(fw_priv)) {
514 mutex_unlock(&fw_lock);
515 retval = -EINTR;
516 goto out;
517 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000518 list_add(&fw_priv->pending_list, &pending_fw_head);
519 mutex_unlock(&fw_lock);
520
521 if (opt_flags & FW_OPT_UEVENT) {
522 fw_priv->need_uevent = true;
523 dev_set_uevent_suppress(f_dev, false);
524 dev_dbg(f_dev, "firmware: requesting %s\n", fw_priv->fw_name);
525 kobject_uevent(&fw_sysfs->dev.kobj, KOBJ_ADD);
526 } else {
527 timeout = MAX_JIFFY_OFFSET;
528 }
529
530 retval = fw_sysfs_wait_timeout(fw_priv, timeout);
Olivier Deprez0e641232021-09-23 10:07:05 +0200531 if (retval < 0 && retval != -ENOENT) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000532 mutex_lock(&fw_lock);
533 fw_load_abort(fw_sysfs);
534 mutex_unlock(&fw_lock);
535 }
536
537 if (fw_state_is_aborted(fw_priv)) {
538 if (retval == -ERESTARTSYS)
539 retval = -EINTR;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000540 } else if (fw_priv->is_paged_buf && !fw_priv->data)
541 retval = -ENOMEM;
542
Olivier Deprez0e641232021-09-23 10:07:05 +0200543out:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000544 device_del(f_dev);
545err_put_dev:
546 put_device(f_dev);
547 return retval;
548}
549
550static int fw_load_from_user_helper(struct firmware *firmware,
551 const char *name, struct device *device,
552 enum fw_opt opt_flags)
553{
554 struct fw_sysfs *fw_sysfs;
555 long timeout;
556 int ret;
557
558 timeout = firmware_loading_timeout();
559 if (opt_flags & FW_OPT_NOWAIT) {
560 timeout = usermodehelper_read_lock_wait(timeout);
561 if (!timeout) {
562 dev_dbg(device, "firmware: %s loading timed out\n",
563 name);
564 return -EBUSY;
565 }
566 } else {
567 ret = usermodehelper_read_trylock();
568 if (WARN_ON(ret)) {
569 dev_err(device, "firmware: %s will not be loaded\n",
570 name);
571 return ret;
572 }
573 }
574
575 fw_sysfs = fw_create_instance(firmware, name, device, opt_flags);
576 if (IS_ERR(fw_sysfs)) {
577 ret = PTR_ERR(fw_sysfs);
578 goto out_unlock;
579 }
580
581 fw_sysfs->fw_priv = firmware->priv;
582 ret = fw_load_sysfs_fallback(fw_sysfs, opt_flags, timeout);
583
584 if (!ret)
585 ret = assign_fw(firmware, device, opt_flags);
586
587out_unlock:
588 usermodehelper_read_unlock();
589
590 return ret;
591}
592
593static bool fw_force_sysfs_fallback(enum fw_opt opt_flags)
594{
595 if (fw_fallback_config.force_sysfs_fallback)
596 return true;
597 if (!(opt_flags & FW_OPT_USERHELPER))
598 return false;
599 return true;
600}
601
602static bool fw_run_sysfs_fallback(enum fw_opt opt_flags)
603{
604 int ret;
605
606 if (fw_fallback_config.ignore_sysfs_fallback) {
607 pr_info_once("Ignoring firmware sysfs fallback due to sysctl knob\n");
608 return false;
609 }
610
611 if ((opt_flags & FW_OPT_NOFALLBACK))
612 return false;
613
614 /* Also permit LSMs and IMA to fail firmware sysfs fallback */
615 ret = security_kernel_load_data(LOADING_FIRMWARE);
616 if (ret < 0)
David Brazdil0f672f62019-12-10 10:32:29 +0000617 return false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000618
619 return fw_force_sysfs_fallback(opt_flags);
620}
621
622/**
623 * firmware_fallback_sysfs() - use the fallback mechanism to find firmware
624 * @fw: pointer to firmware image
625 * @name: name of firmware file to look for
626 * @device: device for which firmware is being loaded
627 * @opt_flags: options to control firmware loading behaviour
628 * @ret: return value from direct lookup which triggered the fallback mechanism
629 *
630 * This function is called if direct lookup for the firmware failed, it enables
631 * a fallback mechanism through userspace by exposing a sysfs loading
David Brazdil0f672f62019-12-10 10:32:29 +0000632 * interface. Userspace is in charge of loading the firmware through the sysfs
633 * loading interface. This sysfs fallback mechanism may be disabled completely
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000634 * on a system by setting the proc sysctl value ignore_sysfs_fallback to true.
635 * If this false we check if the internal API caller set the @FW_OPT_NOFALLBACK
636 * flag, if so it would also disable the fallback mechanism. A system may want
637 * to enfoce the sysfs fallback mechanism at all times, it can do this by
638 * setting ignore_sysfs_fallback to false and force_sysfs_fallback to true.
639 * Enabling force_sysfs_fallback is functionally equivalent to build a kernel
640 * with CONFIG_FW_LOADER_USER_HELPER_FALLBACK.
641 **/
642int firmware_fallback_sysfs(struct firmware *fw, const char *name,
643 struct device *device,
644 enum fw_opt opt_flags,
645 int ret)
646{
647 if (!fw_run_sysfs_fallback(opt_flags))
648 return ret;
649
650 if (!(opt_flags & FW_OPT_NO_WARN))
David Brazdil0f672f62019-12-10 10:32:29 +0000651 dev_warn(device, "Falling back to sysfs fallback for: %s\n",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000652 name);
653 else
654 dev_dbg(device, "Falling back to sysfs fallback for: %s\n",
655 name);
656 return fw_load_from_user_helper(fw, name, device, opt_flags);
657}