1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/module.h>
30#include <linux/platform_device.h>
31#include <linux/acpi.h>
32#include <linux/slab.h>
33#include <linux/io.h>
34#include <linux/ioport.h>
35
36MODULE_AUTHOR("Gabriel L. Somlo <somlo@cmu.edu>");
37MODULE_DESCRIPTION("QEMU fw_cfg sysfs support");
38MODULE_LICENSE("GPL");
39
40
41#define FW_CFG_SIGNATURE 0x00
42#define FW_CFG_ID 0x01
43#define FW_CFG_FILE_DIR 0x19
44
45
46#define FW_CFG_SIG_SIZE 4
47
48
49#define FW_CFG_MAX_FILE_PATH 56
50
51
52struct fw_cfg_file {
53 u32 size;
54 u16 select;
55 u16 reserved;
56 char name[FW_CFG_MAX_FILE_PATH];
57};
58
59
60static bool fw_cfg_is_mmio;
61static phys_addr_t fw_cfg_p_base;
62static resource_size_t fw_cfg_p_size;
63static void __iomem *fw_cfg_dev_base;
64static void __iomem *fw_cfg_reg_ctrl;
65static void __iomem *fw_cfg_reg_data;
66
67
68static DEFINE_MUTEX(fw_cfg_dev_lock);
69
70
71static inline u16 fw_cfg_sel_endianness(u16 key)
72{
73 return fw_cfg_is_mmio ? cpu_to_be16(key) : cpu_to_le16(key);
74}
75
76
77static inline void fw_cfg_read_blob(u16 key,
78 void *buf, loff_t pos, size_t count)
79{
80 u32 glk = -1U;
81 acpi_status status;
82
83
84
85
86 status = acpi_acquire_global_lock(ACPI_WAIT_FOREVER, &glk);
87 if (ACPI_FAILURE(status) && status != AE_NOT_CONFIGURED) {
88
89 WARN(1, "fw_cfg_read_blob: Failed to lock ACPI!\n");
90 memset(buf, 0, count);
91 return;
92 }
93
94 mutex_lock(&fw_cfg_dev_lock);
95 iowrite16(fw_cfg_sel_endianness(key), fw_cfg_reg_ctrl);
96 while (pos-- > 0)
97 ioread8(fw_cfg_reg_data);
98 ioread8_rep(fw_cfg_reg_data, buf, count);
99 mutex_unlock(&fw_cfg_dev_lock);
100
101 acpi_release_global_lock(glk);
102}
103
104
105static void fw_cfg_io_cleanup(void)
106{
107 if (fw_cfg_is_mmio) {
108 iounmap(fw_cfg_dev_base);
109 release_mem_region(fw_cfg_p_base, fw_cfg_p_size);
110 } else {
111 ioport_unmap(fw_cfg_dev_base);
112 release_region(fw_cfg_p_base, fw_cfg_p_size);
113 }
114}
115
116
117#if !(defined(FW_CFG_CTRL_OFF) && defined(FW_CFG_DATA_OFF))
118# if (defined(CONFIG_ARM) || defined(CONFIG_ARM64))
119# define FW_CFG_CTRL_OFF 0x08
120# define FW_CFG_DATA_OFF 0x00
121# elif (defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC32))
122# define FW_CFG_CTRL_OFF 0x00
123# define FW_CFG_DATA_OFF 0x02
124# elif (defined(CONFIG_X86) || defined(CONFIG_SPARC64))
125# define FW_CFG_CTRL_OFF 0x00
126# define FW_CFG_DATA_OFF 0x01
127# else
128# error "QEMU FW_CFG not available on this architecture!"
129# endif
130#endif
131
132
133static int fw_cfg_do_platform_probe(struct platform_device *pdev)
134{
135 char sig[FW_CFG_SIG_SIZE];
136 struct resource *range, *ctrl, *data;
137
138
139 fw_cfg_is_mmio = false;
140 range = platform_get_resource(pdev, IORESOURCE_IO, 0);
141 if (!range) {
142 fw_cfg_is_mmio = true;
143 range = platform_get_resource(pdev, IORESOURCE_MEM, 0);
144 if (!range)
145 return -EINVAL;
146 }
147 fw_cfg_p_base = range->start;
148 fw_cfg_p_size = resource_size(range);
149
150 if (fw_cfg_is_mmio) {
151 if (!request_mem_region(fw_cfg_p_base,
152 fw_cfg_p_size, "fw_cfg_mem"))
153 return -EBUSY;
154 fw_cfg_dev_base = ioremap(fw_cfg_p_base, fw_cfg_p_size);
155 if (!fw_cfg_dev_base) {
156 release_mem_region(fw_cfg_p_base, fw_cfg_p_size);
157 return -EFAULT;
158 }
159 } else {
160 if (!request_region(fw_cfg_p_base,
161 fw_cfg_p_size, "fw_cfg_io"))
162 return -EBUSY;
163 fw_cfg_dev_base = ioport_map(fw_cfg_p_base, fw_cfg_p_size);
164 if (!fw_cfg_dev_base) {
165 release_region(fw_cfg_p_base, fw_cfg_p_size);
166 return -EFAULT;
167 }
168 }
169
170
171 ctrl = platform_get_resource_byname(pdev, IORESOURCE_REG, "ctrl");
172 data = platform_get_resource_byname(pdev, IORESOURCE_REG, "data");
173 if (ctrl && data) {
174 fw_cfg_reg_ctrl = fw_cfg_dev_base + ctrl->start;
175 fw_cfg_reg_data = fw_cfg_dev_base + data->start;
176 } else {
177
178 fw_cfg_reg_ctrl = fw_cfg_dev_base + FW_CFG_CTRL_OFF;
179 fw_cfg_reg_data = fw_cfg_dev_base + FW_CFG_DATA_OFF;
180 }
181
182
183 fw_cfg_read_blob(FW_CFG_SIGNATURE, sig, 0, FW_CFG_SIG_SIZE);
184 if (memcmp(sig, "QEMU", FW_CFG_SIG_SIZE) != 0) {
185 fw_cfg_io_cleanup();
186 return -ENODEV;
187 }
188
189 return 0;
190}
191
192
193static u32 fw_cfg_rev;
194
195static ssize_t fw_cfg_showrev(struct kobject *k, struct attribute *a, char *buf)
196{
197 return sprintf(buf, "%u\n", fw_cfg_rev);
198}
199
200static const struct {
201 struct attribute attr;
202 ssize_t (*show)(struct kobject *k, struct attribute *a, char *buf);
203} fw_cfg_rev_attr = {
204 .attr = { .name = "rev", .mode = S_IRUSR },
205 .show = fw_cfg_showrev,
206};
207
208
209struct fw_cfg_sysfs_entry {
210 struct kobject kobj;
211 struct fw_cfg_file f;
212 struct list_head list;
213};
214
215
216static inline struct fw_cfg_sysfs_entry *to_entry(struct kobject *kobj)
217{
218 return container_of(kobj, struct fw_cfg_sysfs_entry, kobj);
219}
220
221
222struct fw_cfg_sysfs_attribute {
223 struct attribute attr;
224 ssize_t (*show)(struct fw_cfg_sysfs_entry *entry, char *buf);
225};
226
227
228static inline struct fw_cfg_sysfs_attribute *to_attr(struct attribute *attr)
229{
230 return container_of(attr, struct fw_cfg_sysfs_attribute, attr);
231}
232
233
234static LIST_HEAD(fw_cfg_entry_cache);
235
236
237static DEFINE_SPINLOCK(fw_cfg_cache_lock);
238
239static inline void fw_cfg_sysfs_cache_enlist(struct fw_cfg_sysfs_entry *entry)
240{
241 spin_lock(&fw_cfg_cache_lock);
242 list_add_tail(&entry->list, &fw_cfg_entry_cache);
243 spin_unlock(&fw_cfg_cache_lock);
244}
245
246static inline void fw_cfg_sysfs_cache_delist(struct fw_cfg_sysfs_entry *entry)
247{
248 spin_lock(&fw_cfg_cache_lock);
249 list_del(&entry->list);
250 spin_unlock(&fw_cfg_cache_lock);
251}
252
253static void fw_cfg_sysfs_cache_cleanup(void)
254{
255 struct fw_cfg_sysfs_entry *entry, *next;
256
257 list_for_each_entry_safe(entry, next, &fw_cfg_entry_cache, list) {
258
259
260
261 kobject_put(&entry->kobj);
262 }
263}
264
265
266
267#define FW_CFG_SYSFS_ATTR(_attr) \
268struct fw_cfg_sysfs_attribute fw_cfg_sysfs_attr_##_attr = { \
269 .attr = { .name = __stringify(_attr), .mode = S_IRUSR }, \
270 .show = fw_cfg_sysfs_show_##_attr, \
271}
272
273static ssize_t fw_cfg_sysfs_show_size(struct fw_cfg_sysfs_entry *e, char *buf)
274{
275 return sprintf(buf, "%u\n", e->f.size);
276}
277
278static ssize_t fw_cfg_sysfs_show_key(struct fw_cfg_sysfs_entry *e, char *buf)
279{
280 return sprintf(buf, "%u\n", e->f.select);
281}
282
283static ssize_t fw_cfg_sysfs_show_name(struct fw_cfg_sysfs_entry *e, char *buf)
284{
285 return sprintf(buf, "%s\n", e->f.name);
286}
287
288static FW_CFG_SYSFS_ATTR(size);
289static FW_CFG_SYSFS_ATTR(key);
290static FW_CFG_SYSFS_ATTR(name);
291
292static struct attribute *fw_cfg_sysfs_entry_attrs[] = {
293 &fw_cfg_sysfs_attr_size.attr,
294 &fw_cfg_sysfs_attr_key.attr,
295 &fw_cfg_sysfs_attr_name.attr,
296 NULL,
297};
298
299
300static ssize_t fw_cfg_sysfs_attr_show(struct kobject *kobj, struct attribute *a,
301 char *buf)
302{
303 struct fw_cfg_sysfs_entry *entry = to_entry(kobj);
304 struct fw_cfg_sysfs_attribute *attr = to_attr(a);
305
306 return attr->show(entry, buf);
307}
308
309static const struct sysfs_ops fw_cfg_sysfs_attr_ops = {
310 .show = fw_cfg_sysfs_attr_show,
311};
312
313
314static void fw_cfg_sysfs_release_entry(struct kobject *kobj)
315{
316 struct fw_cfg_sysfs_entry *entry = to_entry(kobj);
317
318 fw_cfg_sysfs_cache_delist(entry);
319 kfree(entry);
320}
321
322
323static struct kobj_type fw_cfg_sysfs_entry_ktype = {
324 .default_attrs = fw_cfg_sysfs_entry_attrs,
325 .sysfs_ops = &fw_cfg_sysfs_attr_ops,
326 .release = fw_cfg_sysfs_release_entry,
327};
328
329
330static ssize_t fw_cfg_sysfs_read_raw(struct file *filp, struct kobject *kobj,
331 struct bin_attribute *bin_attr,
332 char *buf, loff_t pos, size_t count)
333{
334 struct fw_cfg_sysfs_entry *entry = to_entry(kobj);
335
336 if (pos > entry->f.size)
337 return -EINVAL;
338
339 if (count > entry->f.size - pos)
340 count = entry->f.size - pos;
341
342 fw_cfg_read_blob(entry->f.select, buf, pos, count);
343 return count;
344}
345
346static struct bin_attribute fw_cfg_sysfs_attr_raw = {
347 .attr = { .name = "raw", .mode = S_IRUSR },
348 .read = fw_cfg_sysfs_read_raw,
349};
350
351
352
353
354
355
356
357
358
359
360
361static int fw_cfg_build_symlink(struct kset *dir,
362 struct kobject *target, const char *name)
363{
364 int ret;
365 struct kset *subdir;
366 struct kobject *ko;
367 char *name_copy, *p, *tok;
368
369 if (!dir || !target || !name || !*name)
370 return -EINVAL;
371
372
373 name_copy = p = kstrdup(name, GFP_KERNEL);
374 if (!name_copy)
375 return -ENOMEM;
376
377
378 while ((tok = strsep(&p, "/")) && *tok) {
379
380
381 if (!p || !*p) {
382 ret = sysfs_create_link(&dir->kobj, target, tok);
383 break;
384 }
385
386
387 ko = kset_find_obj(dir, tok);
388 if (ko) {
389
390 kobject_put(ko);
391
392
393 if (ko->ktype != dir->kobj.ktype) {
394 ret = -EINVAL;
395 break;
396 }
397
398
399 dir = to_kset(ko);
400 } else {
401
402 subdir = kzalloc(sizeof(struct kset), GFP_KERNEL);
403 if (!subdir) {
404 ret = -ENOMEM;
405 break;
406 }
407 subdir->kobj.kset = dir;
408 subdir->kobj.ktype = dir->kobj.ktype;
409 ret = kobject_set_name(&subdir->kobj, "%s", tok);
410 if (ret) {
411 kfree(subdir);
412 break;
413 }
414 ret = kset_register(subdir);
415 if (ret) {
416 kfree(subdir);
417 break;
418 }
419
420
421 dir = subdir;
422 }
423 }
424
425
426 kfree(name_copy);
427 return ret;
428}
429
430
431static void fw_cfg_kset_unregister_recursive(struct kset *kset)
432{
433 struct kobject *k, *next;
434
435 list_for_each_entry_safe(k, next, &kset->list, entry)
436
437 if (k->ktype == kset->kobj.ktype)
438 fw_cfg_kset_unregister_recursive(to_kset(k));
439
440
441 kset_unregister(kset);
442}
443
444
445static struct kobject *fw_cfg_top_ko;
446static struct kobject *fw_cfg_sel_ko;
447static struct kset *fw_cfg_fname_kset;
448
449
450static int fw_cfg_register_file(const struct fw_cfg_file *f)
451{
452 int err;
453 struct fw_cfg_sysfs_entry *entry;
454
455
456 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
457 if (!entry)
458 return -ENOMEM;
459
460
461 memcpy(&entry->f, f, sizeof(struct fw_cfg_file));
462
463
464 err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype,
465 fw_cfg_sel_ko, "%d", entry->f.select);
466 if (err)
467 goto err_register;
468
469
470 err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw);
471 if (err)
472 goto err_add_raw;
473
474
475 fw_cfg_build_symlink(fw_cfg_fname_kset, &entry->kobj, entry->f.name);
476
477
478 fw_cfg_sysfs_cache_enlist(entry);
479 return 0;
480
481err_add_raw:
482 kobject_del(&entry->kobj);
483err_register:
484 kfree(entry);
485 return err;
486}
487
488
489static int fw_cfg_register_dir_entries(void)
490{
491 int ret = 0;
492 u32 count, i;
493 struct fw_cfg_file *dir;
494 size_t dir_size;
495
496 fw_cfg_read_blob(FW_CFG_FILE_DIR, &count, 0, sizeof(count));
497 count = be32_to_cpu(count);
498 dir_size = count * sizeof(struct fw_cfg_file);
499
500 dir = kmalloc(dir_size, GFP_KERNEL);
501 if (!dir)
502 return -ENOMEM;
503
504 fw_cfg_read_blob(FW_CFG_FILE_DIR, dir, sizeof(count), dir_size);
505
506 for (i = 0; i < count; i++) {
507 dir[i].size = be32_to_cpu(dir[i].size);
508 dir[i].select = be16_to_cpu(dir[i].select);
509 ret = fw_cfg_register_file(&dir[i]);
510 if (ret)
511 break;
512 }
513
514 kfree(dir);
515 return ret;
516}
517
518
519static inline void fw_cfg_kobj_cleanup(struct kobject *kobj)
520{
521 kobject_del(kobj);
522 kobject_put(kobj);
523}
524
525static int fw_cfg_sysfs_probe(struct platform_device *pdev)
526{
527 int err;
528
529
530
531
532
533
534
535 if (fw_cfg_sel_ko)
536 return -EBUSY;
537
538
539 err = -ENOMEM;
540 fw_cfg_sel_ko = kobject_create_and_add("by_key", fw_cfg_top_ko);
541 if (!fw_cfg_sel_ko)
542 goto err_sel;
543 fw_cfg_fname_kset = kset_create_and_add("by_name", NULL, fw_cfg_top_ko);
544 if (!fw_cfg_fname_kset)
545 goto err_name;
546
547
548 err = fw_cfg_do_platform_probe(pdev);
549 if (err)
550 goto err_probe;
551
552
553 fw_cfg_read_blob(FW_CFG_ID, &fw_cfg_rev, 0, sizeof(fw_cfg_rev));
554 fw_cfg_rev = le32_to_cpu(fw_cfg_rev);
555 err = sysfs_create_file(fw_cfg_top_ko, &fw_cfg_rev_attr.attr);
556 if (err)
557 goto err_rev;
558
559
560 err = fw_cfg_register_dir_entries();
561 if (err)
562 goto err_dir;
563
564
565 pr_debug("fw_cfg: loaded.\n");
566 return 0;
567
568err_dir:
569 fw_cfg_sysfs_cache_cleanup();
570 sysfs_remove_file(fw_cfg_top_ko, &fw_cfg_rev_attr.attr);
571err_rev:
572 fw_cfg_io_cleanup();
573err_probe:
574 fw_cfg_kset_unregister_recursive(fw_cfg_fname_kset);
575err_name:
576 fw_cfg_kobj_cleanup(fw_cfg_sel_ko);
577err_sel:
578 return err;
579}
580
581static int fw_cfg_sysfs_remove(struct platform_device *pdev)
582{
583 pr_debug("fw_cfg: unloading.\n");
584 fw_cfg_sysfs_cache_cleanup();
585 fw_cfg_kset_unregister_recursive(fw_cfg_fname_kset);
586 fw_cfg_kobj_cleanup(fw_cfg_sel_ko);
587 fw_cfg_io_cleanup();
588 return 0;
589}
590
591static const struct of_device_id fw_cfg_sysfs_mmio_match[] = {
592 { .compatible = "qemu,fw-cfg-mmio", },
593 {},
594};
595MODULE_DEVICE_TABLE(of, fw_cfg_sysfs_mmio_match);
596
597#ifdef CONFIG_ACPI
598static const struct acpi_device_id fw_cfg_sysfs_acpi_match[] = {
599 { "QEMU0002", },
600 {},
601};
602MODULE_DEVICE_TABLE(acpi, fw_cfg_sysfs_acpi_match);
603#endif
604
605static struct platform_driver fw_cfg_sysfs_driver = {
606 .probe = fw_cfg_sysfs_probe,
607 .remove = fw_cfg_sysfs_remove,
608 .driver = {
609 .name = "fw_cfg",
610 .of_match_table = fw_cfg_sysfs_mmio_match,
611 .acpi_match_table = ACPI_PTR(fw_cfg_sysfs_acpi_match),
612 },
613};
614
615#ifdef CONFIG_FW_CFG_SYSFS_CMDLINE
616
617static struct platform_device *fw_cfg_cmdline_dev;
618
619
620
621
622#ifdef CONFIG_PHYS_ADDR_T_64BIT
623#define __PHYS_ADDR_PREFIX "ll"
624#else
625#define __PHYS_ADDR_PREFIX ""
626#endif
627
628
629#define PH_ADDR_SCAN_FMT "@%" __PHYS_ADDR_PREFIX "i%n" \
630 ":%" __PHYS_ADDR_PREFIX "i" \
631 ":%" __PHYS_ADDR_PREFIX "i%n"
632
633#define PH_ADDR_PR_1_FMT "0x%" __PHYS_ADDR_PREFIX "x@" \
634 "0x%" __PHYS_ADDR_PREFIX "x"
635
636#define PH_ADDR_PR_3_FMT PH_ADDR_PR_1_FMT \
637 ":%" __PHYS_ADDR_PREFIX "u" \
638 ":%" __PHYS_ADDR_PREFIX "u"
639
640static int fw_cfg_cmdline_set(const char *arg, const struct kernel_param *kp)
641{
642 struct resource res[3] = {};
643 char *str;
644 phys_addr_t base;
645 resource_size_t size, ctrl_off, data_off;
646 int processed, consumed = 0;
647
648
649
650
651
652 if (fw_cfg_cmdline_dev) {
653
654 platform_device_unregister(fw_cfg_cmdline_dev);
655 return -EINVAL;
656 }
657
658
659 size = memparse(arg, &str);
660
661
662 processed = sscanf(str, PH_ADDR_SCAN_FMT,
663 &base, &consumed,
664 &ctrl_off, &data_off, &consumed);
665
666
667
668
669
670
671
672 if (str[consumed] ||
673 (processed != 1 && processed != 3))
674 return -EINVAL;
675
676 res[0].start = base;
677 res[0].end = base + size - 1;
678 res[0].flags = !strcmp(kp->name, "mmio") ? IORESOURCE_MEM :
679 IORESOURCE_IO;
680
681
682 if (processed > 1) {
683 res[1].name = "ctrl";
684 res[1].start = ctrl_off;
685 res[1].flags = IORESOURCE_REG;
686 res[2].name = "data";
687 res[2].start = data_off;
688 res[2].flags = IORESOURCE_REG;
689 }
690
691
692
693
694 fw_cfg_cmdline_dev = platform_device_register_simple("fw_cfg",
695 PLATFORM_DEVID_NONE, res, processed);
696 if (IS_ERR(fw_cfg_cmdline_dev))
697 return PTR_ERR(fw_cfg_cmdline_dev);
698
699 return 0;
700}
701
702static int fw_cfg_cmdline_get(char *buf, const struct kernel_param *kp)
703{
704
705
706
707
708 if (!fw_cfg_cmdline_dev ||
709 (!strcmp(kp->name, "mmio") ^
710 (fw_cfg_cmdline_dev->resource[0].flags == IORESOURCE_MEM)))
711 return 0;
712
713 switch (fw_cfg_cmdline_dev->num_resources) {
714 case 1:
715 return snprintf(buf, PAGE_SIZE, PH_ADDR_PR_1_FMT,
716 resource_size(&fw_cfg_cmdline_dev->resource[0]),
717 fw_cfg_cmdline_dev->resource[0].start);
718 case 3:
719 return snprintf(buf, PAGE_SIZE, PH_ADDR_PR_3_FMT,
720 resource_size(&fw_cfg_cmdline_dev->resource[0]),
721 fw_cfg_cmdline_dev->resource[0].start,
722 fw_cfg_cmdline_dev->resource[1].start,
723 fw_cfg_cmdline_dev->resource[2].start);
724 }
725
726
727 WARN(1, "Unexpected number of resources: %d\n",
728 fw_cfg_cmdline_dev->num_resources);
729 return 0;
730}
731
732static const struct kernel_param_ops fw_cfg_cmdline_param_ops = {
733 .set = fw_cfg_cmdline_set,
734 .get = fw_cfg_cmdline_get,
735};
736
737device_param_cb(ioport, &fw_cfg_cmdline_param_ops, NULL, S_IRUSR);
738device_param_cb(mmio, &fw_cfg_cmdline_param_ops, NULL, S_IRUSR);
739
740#endif
741
742static int __init fw_cfg_sysfs_init(void)
743{
744 int ret;
745
746
747 fw_cfg_top_ko = kobject_create_and_add("qemu_fw_cfg", firmware_kobj);
748 if (!fw_cfg_top_ko)
749 return -ENOMEM;
750
751 ret = platform_driver_register(&fw_cfg_sysfs_driver);
752 if (ret)
753 fw_cfg_kobj_cleanup(fw_cfg_top_ko);
754
755 return ret;
756}
757
758static void __exit fw_cfg_sysfs_exit(void)
759{
760 platform_driver_unregister(&fw_cfg_sysfs_driver);
761
762#ifdef CONFIG_FW_CFG_SYSFS_CMDLINE
763 platform_device_unregister(fw_cfg_cmdline_dev);
764#endif
765
766
767 fw_cfg_kobj_cleanup(fw_cfg_top_ko);
768}
769
770module_init(fw_cfg_sysfs_init);
771module_exit(fw_cfg_sysfs_exit);
772