1
2
3
4
5
6
7
8
9
10#include <linux/capability.h>
11#include <linux/device.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/timer.h>
15#include <linux/vmalloc.h>
16#include <linux/interrupt.h>
17#include <linux/bitops.h>
18#include <linux/mutex.h>
19#include <linux/workqueue.h>
20#include <linux/highmem.h>
21#include <linux/firmware.h>
22#include <linux/slab.h>
23#include <linux/sched.h>
24#include <linux/file.h>
25#include <linux/list.h>
26#include <linux/fs.h>
27#include <linux/async.h>
28#include <linux/pm.h>
29#include <linux/suspend.h>
30#include <linux/syscore_ops.h>
31#include <linux/reboot.h>
32#include <linux/security.h>
33
34#include <generated/utsrelease.h>
35
36#include "base.h"
37
38MODULE_AUTHOR("Manuel Estrada Sainz");
39MODULE_DESCRIPTION("Multi purpose firmware loading support");
40MODULE_LICENSE("GPL");
41
42
43
44#ifdef CONFIG_FW_LOADER
45
46extern struct builtin_fw __start_builtin_fw[];
47extern struct builtin_fw __end_builtin_fw[];
48
49static bool fw_get_builtin_firmware(struct firmware *fw, const char *name,
50 void *buf, size_t size)
51{
52 struct builtin_fw *b_fw;
53
54 for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
55 if (strcmp(name, b_fw->name) == 0) {
56 fw->size = b_fw->size;
57 fw->data = b_fw->data;
58
59 if (buf && fw->size <= size)
60 memcpy(buf, fw->data, fw->size);
61 return true;
62 }
63 }
64
65 return false;
66}
67
68static bool fw_is_builtin_firmware(const struct firmware *fw)
69{
70 struct builtin_fw *b_fw;
71
72 for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++)
73 if (fw->data == b_fw->data)
74 return true;
75
76 return false;
77}
78
79#else
80
81static inline bool fw_get_builtin_firmware(struct firmware *fw,
82 const char *name, void *buf,
83 size_t size)
84{
85 return false;
86}
87
88static inline bool fw_is_builtin_firmware(const struct firmware *fw)
89{
90 return false;
91}
92#endif
93
94enum {
95 FW_STATUS_LOADING,
96 FW_STATUS_DONE,
97 FW_STATUS_ABORT,
98};
99
100static int loading_timeout = 60;
101
102static inline long firmware_loading_timeout(void)
103{
104 return loading_timeout > 0 ? loading_timeout * HZ : MAX_JIFFY_OFFSET;
105}
106
107
108#define FW_OPT_UEVENT (1U << 0)
109#define FW_OPT_NOWAIT (1U << 1)
110#ifdef CONFIG_FW_LOADER_USER_HELPER
111#define FW_OPT_USERHELPER (1U << 2)
112#else
113#define FW_OPT_USERHELPER 0
114#endif
115#ifdef CONFIG_FW_LOADER_USER_HELPER_FALLBACK
116#define FW_OPT_FALLBACK FW_OPT_USERHELPER
117#else
118#define FW_OPT_FALLBACK 0
119#endif
120#define FW_OPT_NO_WARN (1U << 3)
121#define FW_OPT_NOCACHE (1U << 4)
122
123struct firmware_cache {
124
125 spinlock_t lock;
126 struct list_head head;
127 int state;
128
129#ifdef CONFIG_PM_SLEEP
130
131
132
133
134
135
136 spinlock_t name_lock;
137 struct list_head fw_names;
138
139 struct delayed_work work;
140
141 struct notifier_block pm_notify;
142#endif
143};
144
145struct firmware_buf {
146 struct kref ref;
147 struct list_head list;
148 struct completion completion;
149 struct firmware_cache *fwc;
150 unsigned long status;
151 void *data;
152 size_t size;
153 size_t allocated_size;
154#ifdef CONFIG_FW_LOADER_USER_HELPER
155 bool is_paged_buf;
156 bool need_uevent;
157 struct page **pages;
158 int nr_pages;
159 int page_array_size;
160 struct list_head pending_list;
161#endif
162 const char *fw_id;
163};
164
165struct fw_cache_entry {
166 struct list_head list;
167 const char *name;
168};
169
170struct fw_name_devm {
171 unsigned long magic;
172 const char *name;
173};
174
175#define to_fwbuf(d) container_of(d, struct firmware_buf, ref)
176
177#define FW_LOADER_NO_CACHE 0
178#define FW_LOADER_START_CACHE 1
179
180static int fw_cache_piggyback_on_request(const char *name);
181
182
183
184static DEFINE_MUTEX(fw_lock);
185
186static struct firmware_cache fw_cache;
187
188static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
189 struct firmware_cache *fwc,
190 void *dbuf, size_t size)
191{
192 struct firmware_buf *buf;
193
194 buf = kzalloc(sizeof(*buf), GFP_ATOMIC);
195 if (!buf)
196 return NULL;
197
198 buf->fw_id = kstrdup_const(fw_name, GFP_ATOMIC);
199 if (!buf->fw_id) {
200 kfree(buf);
201 return NULL;
202 }
203
204 kref_init(&buf->ref);
205 buf->fwc = fwc;
206 buf->data = dbuf;
207 buf->allocated_size = size;
208 init_completion(&buf->completion);
209#ifdef CONFIG_FW_LOADER_USER_HELPER
210 INIT_LIST_HEAD(&buf->pending_list);
211#endif
212
213 pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf);
214
215 return buf;
216}
217
218static struct firmware_buf *__fw_lookup_buf(const char *fw_name)
219{
220 struct firmware_buf *tmp;
221 struct firmware_cache *fwc = &fw_cache;
222
223 list_for_each_entry(tmp, &fwc->head, list)
224 if (!strcmp(tmp->fw_id, fw_name))
225 return tmp;
226 return NULL;
227}
228
229static int fw_lookup_and_allocate_buf(const char *fw_name,
230 struct firmware_cache *fwc,
231 struct firmware_buf **buf, void *dbuf,
232 size_t size)
233{
234 struct firmware_buf *tmp;
235
236 spin_lock(&fwc->lock);
237 tmp = __fw_lookup_buf(fw_name);
238 if (tmp) {
239 kref_get(&tmp->ref);
240 spin_unlock(&fwc->lock);
241 *buf = tmp;
242 return 1;
243 }
244 tmp = __allocate_fw_buf(fw_name, fwc, dbuf, size);
245 if (tmp)
246 list_add(&tmp->list, &fwc->head);
247 spin_unlock(&fwc->lock);
248
249 *buf = tmp;
250
251 return tmp ? 0 : -ENOMEM;
252}
253
254static void __fw_free_buf(struct kref *ref)
255 __releases(&fwc->lock)
256{
257 struct firmware_buf *buf = to_fwbuf(ref);
258 struct firmware_cache *fwc = buf->fwc;
259
260 pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
261 __func__, buf->fw_id, buf, buf->data,
262 (unsigned int)buf->size);
263
264 list_del(&buf->list);
265 spin_unlock(&fwc->lock);
266
267#ifdef CONFIG_FW_LOADER_USER_HELPER
268 if (buf->is_paged_buf) {
269 int i;
270 vunmap(buf->data);
271 for (i = 0; i < buf->nr_pages; i++)
272 __free_page(buf->pages[i]);
273 vfree(buf->pages);
274 } else
275#endif
276 if (!buf->allocated_size)
277 vfree(buf->data);
278 kfree_const(buf->fw_id);
279 kfree(buf);
280}
281
282static void fw_free_buf(struct firmware_buf *buf)
283{
284 struct firmware_cache *fwc = buf->fwc;
285 spin_lock(&fwc->lock);
286 if (!kref_put(&buf->ref, __fw_free_buf))
287 spin_unlock(&fwc->lock);
288}
289
290
291static char fw_path_para[256];
292static const char * const fw_path[] = {
293 fw_path_para,
294 "/lib/firmware/updates/" UTS_RELEASE,
295 "/lib/firmware/updates",
296 "/lib/firmware/" UTS_RELEASE,
297 "/lib/firmware"
298};
299
300
301
302
303
304
305module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
306MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
307
308static void fw_finish_direct_load(struct device *device,
309 struct firmware_buf *buf)
310{
311 mutex_lock(&fw_lock);
312 set_bit(FW_STATUS_DONE, &buf->status);
313 complete_all(&buf->completion);
314 mutex_unlock(&fw_lock);
315}
316
317static int
318fw_get_filesystem_firmware(struct device *device, struct firmware_buf *buf)
319{
320 loff_t size;
321 int i, len;
322 int rc = -ENOENT;
323 char *path;
324 enum kernel_read_file_id id = READING_FIRMWARE;
325 size_t msize = INT_MAX;
326
327
328 if (buf->data) {
329 id = READING_FIRMWARE_PREALLOC_BUFFER;
330 msize = buf->allocated_size;
331 }
332
333 path = __getname();
334 if (!path)
335 return -ENOMEM;
336
337 for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
338
339 if (!fw_path[i][0])
340 continue;
341
342 len = snprintf(path, PATH_MAX, "%s/%s",
343 fw_path[i], buf->fw_id);
344 if (len >= PATH_MAX) {
345 rc = -ENAMETOOLONG;
346 break;
347 }
348
349 buf->size = 0;
350 rc = kernel_read_file_from_path(path, &buf->data, &size, msize,
351 id);
352 if (rc) {
353 if (rc == -ENOENT)
354 dev_dbg(device, "loading %s failed with error %d\n",
355 path, rc);
356 else
357 dev_warn(device, "loading %s failed with error %d\n",
358 path, rc);
359 continue;
360 }
361 dev_dbg(device, "direct-loading %s\n", buf->fw_id);
362 buf->size = size;
363 fw_finish_direct_load(device, buf);
364 break;
365 }
366 __putname(path);
367
368 return rc;
369}
370
371
372static void firmware_free_data(const struct firmware *fw)
373{
374
375 if (!fw->priv) {
376 vfree(fw->data);
377 return;
378 }
379 fw_free_buf(fw->priv);
380}
381
382
383static void fw_set_page_data(struct firmware_buf *buf, struct firmware *fw)
384{
385 fw->priv = buf;
386#ifdef CONFIG_FW_LOADER_USER_HELPER
387 fw->pages = buf->pages;
388#endif
389 fw->size = buf->size;
390 fw->data = buf->data;
391
392 pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
393 __func__, buf->fw_id, buf, buf->data,
394 (unsigned int)buf->size);
395}
396
397#ifdef CONFIG_PM_SLEEP
398static void fw_name_devm_release(struct device *dev, void *res)
399{
400 struct fw_name_devm *fwn = res;
401
402 if (fwn->magic == (unsigned long)&fw_cache)
403 pr_debug("%s: fw_name-%s devm-%p released\n",
404 __func__, fwn->name, res);
405 kfree_const(fwn->name);
406}
407
408static int fw_devm_match(struct device *dev, void *res,
409 void *match_data)
410{
411 struct fw_name_devm *fwn = res;
412
413 return (fwn->magic == (unsigned long)&fw_cache) &&
414 !strcmp(fwn->name, match_data);
415}
416
417static struct fw_name_devm *fw_find_devm_name(struct device *dev,
418 const char *name)
419{
420 struct fw_name_devm *fwn;
421
422 fwn = devres_find(dev, fw_name_devm_release,
423 fw_devm_match, (void *)name);
424 return fwn;
425}
426
427
428static int fw_add_devm_name(struct device *dev, const char *name)
429{
430 struct fw_name_devm *fwn;
431
432 fwn = fw_find_devm_name(dev, name);
433 if (fwn)
434 return 1;
435
436 fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm),
437 GFP_KERNEL);
438 if (!fwn)
439 return -ENOMEM;
440 fwn->name = kstrdup_const(name, GFP_KERNEL);
441 if (!fwn->name) {
442 devres_free(fwn);
443 return -ENOMEM;
444 }
445
446 fwn->magic = (unsigned long)&fw_cache;
447 devres_add(dev, fwn);
448
449 return 0;
450}
451#else
452static int fw_add_devm_name(struct device *dev, const char *name)
453{
454 return 0;
455}
456#endif
457
458
459
460
461
462#ifdef CONFIG_FW_LOADER_USER_HELPER
463struct firmware_priv {
464 bool nowait;
465 struct device dev;
466 struct firmware_buf *buf;
467 struct firmware *fw;
468};
469
470static struct firmware_priv *to_firmware_priv(struct device *dev)
471{
472 return container_of(dev, struct firmware_priv, dev);
473}
474
475static void __fw_load_abort(struct firmware_buf *buf)
476{
477
478
479
480
481 if (test_bit(FW_STATUS_DONE, &buf->status))
482 return;
483
484 list_del_init(&buf->pending_list);
485 set_bit(FW_STATUS_ABORT, &buf->status);
486 complete_all(&buf->completion);
487}
488
489static void fw_load_abort(struct firmware_priv *fw_priv)
490{
491 struct firmware_buf *buf = fw_priv->buf;
492
493 __fw_load_abort(buf);
494
495
496 fw_priv->buf = NULL;
497}
498
499#define is_fw_load_aborted(buf) \
500 test_bit(FW_STATUS_ABORT, &(buf)->status)
501
502static LIST_HEAD(pending_fw_head);
503
504
505static int fw_shutdown_notify(struct notifier_block *unused1,
506 unsigned long unused2, void *unused3)
507{
508 mutex_lock(&fw_lock);
509 while (!list_empty(&pending_fw_head))
510 __fw_load_abort(list_first_entry(&pending_fw_head,
511 struct firmware_buf,
512 pending_list));
513 mutex_unlock(&fw_lock);
514 return NOTIFY_DONE;
515}
516
517static struct notifier_block fw_shutdown_nb = {
518 .notifier_call = fw_shutdown_notify,
519};
520
521static ssize_t timeout_show(struct class *class, struct class_attribute *attr,
522 char *buf)
523{
524 return sprintf(buf, "%d\n", loading_timeout);
525}
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540static ssize_t timeout_store(struct class *class, struct class_attribute *attr,
541 const char *buf, size_t count)
542{
543 loading_timeout = simple_strtol(buf, NULL, 10);
544 if (loading_timeout < 0)
545 loading_timeout = 0;
546
547 return count;
548}
549
550static struct class_attribute firmware_class_attrs[] = {
551 __ATTR_RW(timeout),
552 __ATTR_NULL
553};
554
555static void fw_dev_release(struct device *dev)
556{
557 struct firmware_priv *fw_priv = to_firmware_priv(dev);
558
559 kfree(fw_priv);
560}
561
562static int do_firmware_uevent(struct firmware_priv *fw_priv, struct kobj_uevent_env *env)
563{
564 if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id))
565 return -ENOMEM;
566 if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
567 return -ENOMEM;
568 if (add_uevent_var(env, "ASYNC=%d", fw_priv->nowait))
569 return -ENOMEM;
570
571 return 0;
572}
573
574static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
575{
576 struct firmware_priv *fw_priv = to_firmware_priv(dev);
577 int err = 0;
578
579 mutex_lock(&fw_lock);
580 if (fw_priv->buf)
581 err = do_firmware_uevent(fw_priv, env);
582 mutex_unlock(&fw_lock);
583 return err;
584}
585
586static struct class firmware_class = {
587 .name = "firmware",
588 .class_attrs = firmware_class_attrs,
589 .dev_uevent = firmware_uevent,
590 .dev_release = fw_dev_release,
591};
592
593static ssize_t firmware_loading_show(struct device *dev,
594 struct device_attribute *attr, char *buf)
595{
596 struct firmware_priv *fw_priv = to_firmware_priv(dev);
597 int loading = 0;
598
599 mutex_lock(&fw_lock);
600 if (fw_priv->buf)
601 loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status);
602 mutex_unlock(&fw_lock);
603
604 return sprintf(buf, "%d\n", loading);
605}
606
607
608#ifndef PAGE_KERNEL_RO
609#define PAGE_KERNEL_RO PAGE_KERNEL
610#endif
611
612
613static int fw_map_pages_buf(struct firmware_buf *buf)
614{
615 if (!buf->is_paged_buf)
616 return 0;
617
618 vunmap(buf->data);
619 buf->data = vmap(buf->pages, buf->nr_pages, 0, PAGE_KERNEL_RO);
620 if (!buf->data)
621 return -ENOMEM;
622 return 0;
623}
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638static ssize_t firmware_loading_store(struct device *dev,
639 struct device_attribute *attr,
640 const char *buf, size_t count)
641{
642 struct firmware_priv *fw_priv = to_firmware_priv(dev);
643 struct firmware_buf *fw_buf;
644 ssize_t written = count;
645 int loading = simple_strtol(buf, NULL, 10);
646 int i;
647
648 mutex_lock(&fw_lock);
649 fw_buf = fw_priv->buf;
650 if (!fw_buf)
651 goto out;
652
653 switch (loading) {
654 case 1:
655
656 if (!test_bit(FW_STATUS_DONE, &fw_buf->status)) {
657 for (i = 0; i < fw_buf->nr_pages; i++)
658 __free_page(fw_buf->pages[i]);
659 vfree(fw_buf->pages);
660 fw_buf->pages = NULL;
661 fw_buf->page_array_size = 0;
662 fw_buf->nr_pages = 0;
663 set_bit(FW_STATUS_LOADING, &fw_buf->status);
664 }
665 break;
666 case 0:
667 if (test_bit(FW_STATUS_LOADING, &fw_buf->status)) {
668 int rc;
669
670 set_bit(FW_STATUS_DONE, &fw_buf->status);
671 clear_bit(FW_STATUS_LOADING, &fw_buf->status);
672
673
674
675
676
677
678
679 rc = fw_map_pages_buf(fw_buf);
680 if (rc)
681 dev_err(dev, "%s: map pages failed\n",
682 __func__);
683 else
684 rc = security_kernel_post_read_file(NULL,
685 fw_buf->data, fw_buf->size,
686 READING_FIRMWARE);
687
688
689
690
691
692 list_del_init(&fw_buf->pending_list);
693 if (rc) {
694 set_bit(FW_STATUS_ABORT, &fw_buf->status);
695 written = rc;
696 }
697 complete_all(&fw_buf->completion);
698 break;
699 }
700
701 default:
702 dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
703
704 case -1:
705 fw_load_abort(fw_priv);
706 break;
707 }
708out:
709 mutex_unlock(&fw_lock);
710 return written;
711}
712
713static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
714
715static void firmware_rw_buf(struct firmware_buf *buf, char *buffer,
716 loff_t offset, size_t count, bool read)
717{
718 if (read)
719 memcpy(buffer, buf->data + offset, count);
720 else
721 memcpy(buf->data + offset, buffer, count);
722}
723
724static void firmware_rw(struct firmware_buf *buf, char *buffer,
725 loff_t offset, size_t count, bool read)
726{
727 while (count) {
728 void *page_data;
729 int page_nr = offset >> PAGE_SHIFT;
730 int page_ofs = offset & (PAGE_SIZE-1);
731 int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
732
733 page_data = kmap(buf->pages[page_nr]);
734
735 if (read)
736 memcpy(buffer, page_data + page_ofs, page_cnt);
737 else
738 memcpy(page_data + page_ofs, buffer, page_cnt);
739
740 kunmap(buf->pages[page_nr]);
741 buffer += page_cnt;
742 offset += page_cnt;
743 count -= page_cnt;
744 }
745}
746
747static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
748 struct bin_attribute *bin_attr,
749 char *buffer, loff_t offset, size_t count)
750{
751 struct device *dev = kobj_to_dev(kobj);
752 struct firmware_priv *fw_priv = to_firmware_priv(dev);
753 struct firmware_buf *buf;
754 ssize_t ret_count;
755
756 mutex_lock(&fw_lock);
757 buf = fw_priv->buf;
758 if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) {
759 ret_count = -ENODEV;
760 goto out;
761 }
762 if (offset > buf->size) {
763 ret_count = 0;
764 goto out;
765 }
766 if (count > buf->size - offset)
767 count = buf->size - offset;
768
769 ret_count = count;
770
771 if (buf->data)
772 firmware_rw_buf(buf, buffer, offset, count, true);
773 else
774 firmware_rw(buf, buffer, offset, count, true);
775
776out:
777 mutex_unlock(&fw_lock);
778 return ret_count;
779}
780
781static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
782{
783 struct firmware_buf *buf = fw_priv->buf;
784 int pages_needed = PAGE_ALIGN(min_size) >> PAGE_SHIFT;
785
786
787 if (buf->page_array_size < pages_needed) {
788 int new_array_size = max(pages_needed,
789 buf->page_array_size * 2);
790 struct page **new_pages;
791
792 new_pages = vmalloc(new_array_size * sizeof(void *));
793 if (!new_pages) {
794 fw_load_abort(fw_priv);
795 return -ENOMEM;
796 }
797 memcpy(new_pages, buf->pages,
798 buf->page_array_size * sizeof(void *));
799 memset(&new_pages[buf->page_array_size], 0, sizeof(void *) *
800 (new_array_size - buf->page_array_size));
801 vfree(buf->pages);
802 buf->pages = new_pages;
803 buf->page_array_size = new_array_size;
804 }
805
806 while (buf->nr_pages < pages_needed) {
807 buf->pages[buf->nr_pages] =
808 alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
809
810 if (!buf->pages[buf->nr_pages]) {
811 fw_load_abort(fw_priv);
812 return -ENOMEM;
813 }
814 buf->nr_pages++;
815 }
816 return 0;
817}
818
819
820
821
822
823
824
825
826
827
828
829
830
831static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
832 struct bin_attribute *bin_attr,
833 char *buffer, loff_t offset, size_t count)
834{
835 struct device *dev = kobj_to_dev(kobj);
836 struct firmware_priv *fw_priv = to_firmware_priv(dev);
837 struct firmware_buf *buf;
838 ssize_t retval;
839
840 if (!capable(CAP_SYS_RAWIO))
841 return -EPERM;
842
843 mutex_lock(&fw_lock);
844 buf = fw_priv->buf;
845 if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) {
846 retval = -ENODEV;
847 goto out;
848 }
849
850 if (buf->data) {
851 if (offset + count > buf->allocated_size) {
852 retval = -ENOMEM;
853 goto out;
854 }
855 firmware_rw_buf(buf, buffer, offset, count, false);
856 retval = count;
857 } else {
858 retval = fw_realloc_buffer(fw_priv, offset + count);
859 if (retval)
860 goto out;
861
862 retval = count;
863 firmware_rw(buf, buffer, offset, count, false);
864 }
865
866 buf->size = max_t(size_t, offset + count, buf->size);
867out:
868 mutex_unlock(&fw_lock);
869 return retval;
870}
871
872static struct bin_attribute firmware_attr_data = {
873 .attr = { .name = "data", .mode = 0644 },
874 .size = 0,
875 .read = firmware_data_read,
876 .write = firmware_data_write,
877};
878
879static struct attribute *fw_dev_attrs[] = {
880 &dev_attr_loading.attr,
881 NULL
882};
883
884static struct bin_attribute *fw_dev_bin_attrs[] = {
885 &firmware_attr_data,
886 NULL
887};
888
889static const struct attribute_group fw_dev_attr_group = {
890 .attrs = fw_dev_attrs,
891 .bin_attrs = fw_dev_bin_attrs,
892};
893
894static const struct attribute_group *fw_dev_attr_groups[] = {
895 &fw_dev_attr_group,
896 NULL
897};
898
899static struct firmware_priv *
900fw_create_instance(struct firmware *firmware, const char *fw_name,
901 struct device *device, unsigned int opt_flags)
902{
903 struct firmware_priv *fw_priv;
904 struct device *f_dev;
905
906 fw_priv = kzalloc(sizeof(*fw_priv), GFP_KERNEL);
907 if (!fw_priv) {
908 fw_priv = ERR_PTR(-ENOMEM);
909 goto exit;
910 }
911
912 fw_priv->nowait = !!(opt_flags & FW_OPT_NOWAIT);
913 fw_priv->fw = firmware;
914 f_dev = &fw_priv->dev;
915
916 device_initialize(f_dev);
917 dev_set_name(f_dev, "%s", fw_name);
918 f_dev->parent = device;
919 f_dev->class = &firmware_class;
920 f_dev->groups = fw_dev_attr_groups;
921exit:
922 return fw_priv;
923}
924
925
926static int _request_firmware_load(struct firmware_priv *fw_priv,
927 unsigned int opt_flags, long timeout)
928{
929 int retval = 0;
930 struct device *f_dev = &fw_priv->dev;
931 struct firmware_buf *buf = fw_priv->buf;
932
933
934 if (!buf->data)
935 buf->is_paged_buf = true;
936
937 dev_set_uevent_suppress(f_dev, true);
938
939 retval = device_add(f_dev);
940 if (retval) {
941 dev_err(f_dev, "%s: device_register failed\n", __func__);
942 goto err_put_dev;
943 }
944
945 mutex_lock(&fw_lock);
946 list_add(&buf->pending_list, &pending_fw_head);
947 mutex_unlock(&fw_lock);
948
949 if (opt_flags & FW_OPT_UEVENT) {
950 buf->need_uevent = true;
951 dev_set_uevent_suppress(f_dev, false);
952 dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id);
953 kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
954 } else {
955 timeout = MAX_JIFFY_OFFSET;
956 }
957
958 retval = wait_for_completion_interruptible_timeout(&buf->completion,
959 timeout);
960 if (retval == -ERESTARTSYS || !retval) {
961 mutex_lock(&fw_lock);
962 fw_load_abort(fw_priv);
963 mutex_unlock(&fw_lock);
964 } else if (retval > 0) {
965 retval = 0;
966 }
967
968 if (is_fw_load_aborted(buf))
969 retval = -EAGAIN;
970 else if (buf->is_paged_buf && !buf->data)
971 retval = -ENOMEM;
972
973 device_del(f_dev);
974err_put_dev:
975 put_device(f_dev);
976 return retval;
977}
978
979static int fw_load_from_user_helper(struct firmware *firmware,
980 const char *name, struct device *device,
981 unsigned int opt_flags, long timeout)
982{
983 struct firmware_priv *fw_priv;
984
985 fw_priv = fw_create_instance(firmware, name, device, opt_flags);
986 if (IS_ERR(fw_priv))
987 return PTR_ERR(fw_priv);
988
989 fw_priv->buf = firmware->priv;
990 return _request_firmware_load(fw_priv, opt_flags, timeout);
991}
992
993#ifdef CONFIG_PM_SLEEP
994
995static void kill_requests_without_uevent(void)
996{
997 struct firmware_buf *buf;
998 struct firmware_buf *next;
999
1000 mutex_lock(&fw_lock);
1001 list_for_each_entry_safe(buf, next, &pending_fw_head, pending_list) {
1002 if (!buf->need_uevent)
1003 __fw_load_abort(buf);
1004 }
1005 mutex_unlock(&fw_lock);
1006}
1007#endif
1008
1009#else
1010static inline int
1011fw_load_from_user_helper(struct firmware *firmware, const char *name,
1012 struct device *device, unsigned int opt_flags,
1013 long timeout)
1014{
1015 return -ENOENT;
1016}
1017
1018
1019#define is_fw_load_aborted(buf) false
1020
1021#ifdef CONFIG_PM_SLEEP
1022static inline void kill_requests_without_uevent(void) { }
1023#endif
1024
1025#endif
1026
1027
1028
1029static int sync_cached_firmware_buf(struct firmware_buf *buf)
1030{
1031 int ret = 0;
1032
1033 mutex_lock(&fw_lock);
1034 while (!test_bit(FW_STATUS_DONE, &buf->status)) {
1035 if (is_fw_load_aborted(buf)) {
1036 ret = -ENOENT;
1037 break;
1038 }
1039 mutex_unlock(&fw_lock);
1040 ret = wait_for_completion_interruptible(&buf->completion);
1041 mutex_lock(&fw_lock);
1042 }
1043 mutex_unlock(&fw_lock);
1044 return ret;
1045}
1046
1047
1048
1049
1050
1051static int
1052_request_firmware_prepare(struct firmware **firmware_p, const char *name,
1053 struct device *device, void *dbuf, size_t size)
1054{
1055 struct firmware *firmware;
1056 struct firmware_buf *buf;
1057 int ret;
1058
1059 *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
1060 if (!firmware) {
1061 dev_err(device, "%s: kmalloc(struct firmware) failed\n",
1062 __func__);
1063 return -ENOMEM;
1064 }
1065
1066 if (fw_get_builtin_firmware(firmware, name, dbuf, size)) {
1067 dev_dbg(device, "using built-in %s\n", name);
1068 return 0;
1069 }
1070
1071 ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf, dbuf, size);
1072
1073
1074
1075
1076
1077 firmware->priv = buf;
1078
1079 if (ret > 0) {
1080 ret = sync_cached_firmware_buf(buf);
1081 if (!ret) {
1082 fw_set_page_data(buf, firmware);
1083 return 0;
1084 }
1085 }
1086
1087 if (ret < 0)
1088 return ret;
1089 return 1;
1090}
1091
1092static int assign_firmware_buf(struct firmware *fw, struct device *device,
1093 unsigned int opt_flags)
1094{
1095 struct firmware_buf *buf = fw->priv;
1096
1097 mutex_lock(&fw_lock);
1098 if (!buf->size || is_fw_load_aborted(buf)) {
1099 mutex_unlock(&fw_lock);
1100 return -ENOENT;
1101 }
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111 if (device && (opt_flags & FW_OPT_UEVENT) &&
1112 !(opt_flags & FW_OPT_NOCACHE))
1113 fw_add_devm_name(device, buf->fw_id);
1114
1115
1116
1117
1118
1119 if (!(opt_flags & FW_OPT_NOCACHE) &&
1120 buf->fwc->state == FW_LOADER_START_CACHE) {
1121 if (fw_cache_piggyback_on_request(buf->fw_id))
1122 kref_get(&buf->ref);
1123 }
1124
1125
1126 fw_set_page_data(buf, fw);
1127 mutex_unlock(&fw_lock);
1128 return 0;
1129}
1130
1131
1132static int
1133_request_firmware(const struct firmware **firmware_p, const char *name,
1134 struct device *device, void *buf, size_t size,
1135 unsigned int opt_flags)
1136{
1137 struct firmware *fw = NULL;
1138 long timeout;
1139 int ret;
1140
1141 if (!firmware_p)
1142 return -EINVAL;
1143
1144 if (!name || name[0] == '\0') {
1145 ret = -EINVAL;
1146 goto out;
1147 }
1148
1149 ret = _request_firmware_prepare(&fw, name, device, buf, size);
1150 if (ret <= 0)
1151 goto out;
1152
1153 ret = 0;
1154 timeout = firmware_loading_timeout();
1155 if (opt_flags & FW_OPT_NOWAIT) {
1156 timeout = usermodehelper_read_lock_wait(timeout);
1157 if (!timeout) {
1158 dev_dbg(device, "firmware: %s loading timed out\n",
1159 name);
1160 ret = -EBUSY;
1161 goto out;
1162 }
1163 } else {
1164 ret = usermodehelper_read_trylock();
1165 if (WARN_ON(ret)) {
1166 dev_err(device, "firmware: %s will not be loaded\n",
1167 name);
1168 goto out;
1169 }
1170 }
1171
1172 ret = fw_get_filesystem_firmware(device, fw->priv);
1173 if (ret) {
1174 if (!(opt_flags & FW_OPT_NO_WARN))
1175 dev_warn(device,
1176 "Direct firmware load for %s failed with error %d\n",
1177 name, ret);
1178 if (opt_flags & FW_OPT_USERHELPER) {
1179 dev_warn(device, "Falling back to user helper\n");
1180 ret = fw_load_from_user_helper(fw, name, device,
1181 opt_flags, timeout);
1182 }
1183 }
1184
1185 if (!ret)
1186 ret = assign_firmware_buf(fw, device, opt_flags);
1187
1188 usermodehelper_read_unlock();
1189
1190 out:
1191 if (ret < 0) {
1192 release_firmware(fw);
1193 fw = NULL;
1194 }
1195
1196 *firmware_p = fw;
1197 return ret;
1198}
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220int
1221request_firmware(const struct firmware **firmware_p, const char *name,
1222 struct device *device)
1223{
1224 int ret;
1225
1226
1227 __module_get(THIS_MODULE);
1228 ret = _request_firmware(firmware_p, name, device, NULL, 0,
1229 FW_OPT_UEVENT | FW_OPT_FALLBACK);
1230 module_put(THIS_MODULE);
1231 return ret;
1232}
1233EXPORT_SYMBOL(request_firmware);
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246int request_firmware_direct(const struct firmware **firmware_p,
1247 const char *name, struct device *device)
1248{
1249 int ret;
1250
1251 __module_get(THIS_MODULE);
1252 ret = _request_firmware(firmware_p, name, device, NULL, 0,
1253 FW_OPT_UEVENT | FW_OPT_NO_WARN);
1254 module_put(THIS_MODULE);
1255 return ret;
1256}
1257EXPORT_SYMBOL_GPL(request_firmware_direct);
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274int
1275request_firmware_into_buf(const struct firmware **firmware_p, const char *name,
1276 struct device *device, void *buf, size_t size)
1277{
1278 int ret;
1279
1280 __module_get(THIS_MODULE);
1281 ret = _request_firmware(firmware_p, name, device, buf, size,
1282 FW_OPT_UEVENT | FW_OPT_FALLBACK |
1283 FW_OPT_NOCACHE);
1284 module_put(THIS_MODULE);
1285 return ret;
1286}
1287EXPORT_SYMBOL(request_firmware_into_buf);
1288
1289
1290
1291
1292
1293void release_firmware(const struct firmware *fw)
1294{
1295 if (fw) {
1296 if (!fw_is_builtin_firmware(fw))
1297 firmware_free_data(fw);
1298 kfree(fw);
1299 }
1300}
1301EXPORT_SYMBOL(release_firmware);
1302
1303
1304struct firmware_work {
1305 struct work_struct work;
1306 struct module *module;
1307 const char *name;
1308 struct device *device;
1309 void *context;
1310 void (*cont)(const struct firmware *fw, void *context);
1311 unsigned int opt_flags;
1312};
1313
1314static void request_firmware_work_func(struct work_struct *work)
1315{
1316 struct firmware_work *fw_work;
1317 const struct firmware *fw;
1318
1319 fw_work = container_of(work, struct firmware_work, work);
1320
1321 _request_firmware(&fw, fw_work->name, fw_work->device, NULL, 0,
1322 fw_work->opt_flags);
1323 fw_work->cont(fw, fw_work->context);
1324 put_device(fw_work->device);
1325
1326 module_put(fw_work->module);
1327 kfree_const(fw_work->name);
1328 kfree(fw_work);
1329}
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354int
1355request_firmware_nowait(
1356 struct module *module, bool uevent,
1357 const char *name, struct device *device, gfp_t gfp, void *context,
1358 void (*cont)(const struct firmware *fw, void *context))
1359{
1360 struct firmware_work *fw_work;
1361
1362 fw_work = kzalloc(sizeof(struct firmware_work), gfp);
1363 if (!fw_work)
1364 return -ENOMEM;
1365
1366 fw_work->module = module;
1367 fw_work->name = kstrdup_const(name, gfp);
1368 if (!fw_work->name) {
1369 kfree(fw_work);
1370 return -ENOMEM;
1371 }
1372 fw_work->device = device;
1373 fw_work->context = context;
1374 fw_work->cont = cont;
1375 fw_work->opt_flags = FW_OPT_NOWAIT | FW_OPT_FALLBACK |
1376 (uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER);
1377
1378 if (!try_module_get(module)) {
1379 kfree_const(fw_work->name);
1380 kfree(fw_work);
1381 return -EFAULT;
1382 }
1383
1384 get_device(fw_work->device);
1385 INIT_WORK(&fw_work->work, request_firmware_work_func);
1386 schedule_work(&fw_work->work);
1387 return 0;
1388}
1389EXPORT_SYMBOL(request_firmware_nowait);
1390
1391#ifdef CONFIG_PM_SLEEP
1392static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408static int cache_firmware(const char *fw_name)
1409{
1410 int ret;
1411 const struct firmware *fw;
1412
1413 pr_debug("%s: %s\n", __func__, fw_name);
1414
1415 ret = request_firmware(&fw, fw_name, NULL);
1416 if (!ret)
1417 kfree(fw);
1418
1419 pr_debug("%s: %s ret=%d\n", __func__, fw_name, ret);
1420
1421 return ret;
1422}
1423
1424static struct firmware_buf *fw_lookup_buf(const char *fw_name)
1425{
1426 struct firmware_buf *tmp;
1427 struct firmware_cache *fwc = &fw_cache;
1428
1429 spin_lock(&fwc->lock);
1430 tmp = __fw_lookup_buf(fw_name);
1431 spin_unlock(&fwc->lock);
1432
1433 return tmp;
1434}
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447static int uncache_firmware(const char *fw_name)
1448{
1449 struct firmware_buf *buf;
1450 struct firmware fw;
1451
1452 pr_debug("%s: %s\n", __func__, fw_name);
1453
1454 if (fw_get_builtin_firmware(&fw, fw_name, NULL, 0))
1455 return 0;
1456
1457 buf = fw_lookup_buf(fw_name);
1458 if (buf) {
1459 fw_free_buf(buf);
1460 return 0;
1461 }
1462
1463 return -EINVAL;
1464}
1465
1466static struct fw_cache_entry *alloc_fw_cache_entry(const char *name)
1467{
1468 struct fw_cache_entry *fce;
1469
1470 fce = kzalloc(sizeof(*fce), GFP_ATOMIC);
1471 if (!fce)
1472 goto exit;
1473
1474 fce->name = kstrdup_const(name, GFP_ATOMIC);
1475 if (!fce->name) {
1476 kfree(fce);
1477 fce = NULL;
1478 goto exit;
1479 }
1480exit:
1481 return fce;
1482}
1483
1484static int __fw_entry_found(const char *name)
1485{
1486 struct firmware_cache *fwc = &fw_cache;
1487 struct fw_cache_entry *fce;
1488
1489 list_for_each_entry(fce, &fwc->fw_names, list) {
1490 if (!strcmp(fce->name, name))
1491 return 1;
1492 }
1493 return 0;
1494}
1495
1496static int fw_cache_piggyback_on_request(const char *name)
1497{
1498 struct firmware_cache *fwc = &fw_cache;
1499 struct fw_cache_entry *fce;
1500 int ret = 0;
1501
1502 spin_lock(&fwc->name_lock);
1503 if (__fw_entry_found(name))
1504 goto found;
1505
1506 fce = alloc_fw_cache_entry(name);
1507 if (fce) {
1508 ret = 1;
1509 list_add(&fce->list, &fwc->fw_names);
1510 pr_debug("%s: fw: %s\n", __func__, name);
1511 }
1512found:
1513 spin_unlock(&fwc->name_lock);
1514 return ret;
1515}
1516
1517static void free_fw_cache_entry(struct fw_cache_entry *fce)
1518{
1519 kfree_const(fce->name);
1520 kfree(fce);
1521}
1522
1523static void __async_dev_cache_fw_image(void *fw_entry,
1524 async_cookie_t cookie)
1525{
1526 struct fw_cache_entry *fce = fw_entry;
1527 struct firmware_cache *fwc = &fw_cache;
1528 int ret;
1529
1530 ret = cache_firmware(fce->name);
1531 if (ret) {
1532 spin_lock(&fwc->name_lock);
1533 list_del(&fce->list);
1534 spin_unlock(&fwc->name_lock);
1535
1536 free_fw_cache_entry(fce);
1537 }
1538}
1539
1540
1541static void dev_create_fw_entry(struct device *dev, void *res,
1542 void *data)
1543{
1544 struct fw_name_devm *fwn = res;
1545 const char *fw_name = fwn->name;
1546 struct list_head *head = data;
1547 struct fw_cache_entry *fce;
1548
1549 fce = alloc_fw_cache_entry(fw_name);
1550 if (fce)
1551 list_add(&fce->list, head);
1552}
1553
1554static int devm_name_match(struct device *dev, void *res,
1555 void *match_data)
1556{
1557 struct fw_name_devm *fwn = res;
1558 return (fwn->magic == (unsigned long)match_data);
1559}
1560
1561static void dev_cache_fw_image(struct device *dev, void *data)
1562{
1563 LIST_HEAD(todo);
1564 struct fw_cache_entry *fce;
1565 struct fw_cache_entry *fce_next;
1566 struct firmware_cache *fwc = &fw_cache;
1567
1568 devres_for_each_res(dev, fw_name_devm_release,
1569 devm_name_match, &fw_cache,
1570 dev_create_fw_entry, &todo);
1571
1572 list_for_each_entry_safe(fce, fce_next, &todo, list) {
1573 list_del(&fce->list);
1574
1575 spin_lock(&fwc->name_lock);
1576
1577 if (!__fw_entry_found(fce->name)) {
1578 list_add(&fce->list, &fwc->fw_names);
1579 } else {
1580 free_fw_cache_entry(fce);
1581 fce = NULL;
1582 }
1583 spin_unlock(&fwc->name_lock);
1584
1585 if (fce)
1586 async_schedule_domain(__async_dev_cache_fw_image,
1587 (void *)fce,
1588 &fw_cache_domain);
1589 }
1590}
1591
1592static void __device_uncache_fw_images(void)
1593{
1594 struct firmware_cache *fwc = &fw_cache;
1595 struct fw_cache_entry *fce;
1596
1597 spin_lock(&fwc->name_lock);
1598 while (!list_empty(&fwc->fw_names)) {
1599 fce = list_entry(fwc->fw_names.next,
1600 struct fw_cache_entry, list);
1601 list_del(&fce->list);
1602 spin_unlock(&fwc->name_lock);
1603
1604 uncache_firmware(fce->name);
1605 free_fw_cache_entry(fce);
1606
1607 spin_lock(&fwc->name_lock);
1608 }
1609 spin_unlock(&fwc->name_lock);
1610}
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622static void device_cache_fw_images(void)
1623{
1624 struct firmware_cache *fwc = &fw_cache;
1625 int old_timeout;
1626 DEFINE_WAIT(wait);
1627
1628 pr_debug("%s\n", __func__);
1629
1630
1631 cancel_delayed_work_sync(&fwc->work);
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641 old_timeout = loading_timeout;
1642 loading_timeout = 10;
1643
1644 mutex_lock(&fw_lock);
1645 fwc->state = FW_LOADER_START_CACHE;
1646 dpm_for_each_dev(NULL, dev_cache_fw_image);
1647 mutex_unlock(&fw_lock);
1648
1649
1650 async_synchronize_full_domain(&fw_cache_domain);
1651
1652 loading_timeout = old_timeout;
1653}
1654
1655
1656
1657
1658
1659
1660
1661static void device_uncache_fw_images(void)
1662{
1663 pr_debug("%s\n", __func__);
1664 __device_uncache_fw_images();
1665}
1666
1667static void device_uncache_fw_images_work(struct work_struct *work)
1668{
1669 device_uncache_fw_images();
1670}
1671
1672
1673
1674
1675
1676
1677
1678
1679static void device_uncache_fw_images_delay(unsigned long delay)
1680{
1681 queue_delayed_work(system_power_efficient_wq, &fw_cache.work,
1682 msecs_to_jiffies(delay));
1683}
1684
1685static int fw_pm_notify(struct notifier_block *notify_block,
1686 unsigned long mode, void *unused)
1687{
1688 switch (mode) {
1689 case PM_HIBERNATION_PREPARE:
1690 case PM_SUSPEND_PREPARE:
1691 case PM_RESTORE_PREPARE:
1692 kill_requests_without_uevent();
1693 device_cache_fw_images();
1694 break;
1695
1696 case PM_POST_SUSPEND:
1697 case PM_POST_HIBERNATION:
1698 case PM_POST_RESTORE:
1699
1700
1701
1702
1703 mutex_lock(&fw_lock);
1704 fw_cache.state = FW_LOADER_NO_CACHE;
1705 mutex_unlock(&fw_lock);
1706
1707 device_uncache_fw_images_delay(10 * MSEC_PER_SEC);
1708 break;
1709 }
1710
1711 return 0;
1712}
1713
1714
1715static int fw_suspend(void)
1716{
1717 fw_cache.state = FW_LOADER_NO_CACHE;
1718 return 0;
1719}
1720
1721static struct syscore_ops fw_syscore_ops = {
1722 .suspend = fw_suspend,
1723};
1724#else
1725static int fw_cache_piggyback_on_request(const char *name)
1726{
1727 return 0;
1728}
1729#endif
1730
1731static void __init fw_cache_init(void)
1732{
1733 spin_lock_init(&fw_cache.lock);
1734 INIT_LIST_HEAD(&fw_cache.head);
1735 fw_cache.state = FW_LOADER_NO_CACHE;
1736
1737#ifdef CONFIG_PM_SLEEP
1738 spin_lock_init(&fw_cache.name_lock);
1739 INIT_LIST_HEAD(&fw_cache.fw_names);
1740
1741 INIT_DELAYED_WORK(&fw_cache.work,
1742 device_uncache_fw_images_work);
1743
1744 fw_cache.pm_notify.notifier_call = fw_pm_notify;
1745 register_pm_notifier(&fw_cache.pm_notify);
1746
1747 register_syscore_ops(&fw_syscore_ops);
1748#endif
1749}
1750
1751static int __init firmware_class_init(void)
1752{
1753 fw_cache_init();
1754#ifdef CONFIG_FW_LOADER_USER_HELPER
1755 register_reboot_notifier(&fw_shutdown_nb);
1756 return class_register(&firmware_class);
1757#else
1758 return 0;
1759#endif
1760}
1761
1762static void __exit firmware_class_exit(void)
1763{
1764#ifdef CONFIG_PM_SLEEP
1765 unregister_syscore_ops(&fw_syscore_ops);
1766 unregister_pm_notifier(&fw_cache.pm_notify);
1767#endif
1768#ifdef CONFIG_FW_LOADER_USER_HELPER
1769 unregister_reboot_notifier(&fw_shutdown_nb);
1770 class_unregister(&firmware_class);
1771#endif
1772}
1773
1774fs_initcall(firmware_class_init);
1775module_exit(firmware_class_exit);
1776