1
2
3
4
5
6
7
8
9
10
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/init.h>
14#include <linux/crash_dump.h>
15#include <linux/export.h>
16#include <linux/bootmem.h>
17#include <linux/pfn.h>
18#include <linux/suspend.h>
19#include <linux/acpi.h>
20#include <linux/firmware-map.h>
21#include <linux/memblock.h>
22#include <linux/sort.h>
23
24#include <asm/e820.h>
25#include <asm/proto.h>
26#include <asm/setup.h>
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42struct e820map e820;
43struct e820map e820_saved;
44
45
46unsigned long pci_mem_start = 0xaeedbabe;
47#ifdef CONFIG_PCI
48EXPORT_SYMBOL(pci_mem_start);
49#endif
50
51
52
53
54
55int
56e820_any_mapped(u64 start, u64 end, unsigned type)
57{
58 int i;
59
60 for (i = 0; i < e820.nr_map; i++) {
61 struct e820entry *ei = &e820.map[i];
62
63 if (type && ei->type != type)
64 continue;
65 if (ei->addr >= end || ei->addr + ei->size <= start)
66 continue;
67 return 1;
68 }
69 return 0;
70}
71EXPORT_SYMBOL_GPL(e820_any_mapped);
72
73
74
75
76
77
78
79int __init e820_all_mapped(u64 start, u64 end, unsigned type)
80{
81 int i;
82
83 for (i = 0; i < e820.nr_map; i++) {
84 struct e820entry *ei = &e820.map[i];
85
86 if (type && ei->type != type)
87 continue;
88
89 if (ei->addr >= end || ei->addr + ei->size <= start)
90 continue;
91
92
93
94
95 if (ei->addr <= start)
96 start = ei->addr + ei->size;
97
98
99
100
101 if (start >= end)
102 return 1;
103 }
104 return 0;
105}
106
107
108
109
110static void __init __e820_add_region(struct e820map *e820x, u64 start, u64 size,
111 int type)
112{
113 int x = e820x->nr_map;
114
115 if (x >= ARRAY_SIZE(e820x->map)) {
116 printk(KERN_ERR "e820: too many entries; ignoring [mem %#010llx-%#010llx]\n",
117 (unsigned long long) start,
118 (unsigned long long) (start + size - 1));
119 return;
120 }
121
122 e820x->map[x].addr = start;
123 e820x->map[x].size = size;
124 e820x->map[x].type = type;
125 e820x->nr_map++;
126}
127
128void __init e820_add_region(u64 start, u64 size, int type)
129{
130 __e820_add_region(&e820, start, size, type);
131}
132
133static void __init e820_print_type(u32 type)
134{
135 switch (type) {
136 case E820_RAM:
137 case E820_RESERVED_KERN:
138 printk(KERN_CONT "usable");
139 break;
140 case E820_RESERVED:
141 printk(KERN_CONT "reserved");
142 break;
143 case E820_ACPI:
144 printk(KERN_CONT "ACPI data");
145 break;
146 case E820_NVS:
147 printk(KERN_CONT "ACPI NVS");
148 break;
149 case E820_UNUSABLE:
150 printk(KERN_CONT "unusable");
151 break;
152 default:
153 printk(KERN_CONT "type %u", type);
154 break;
155 }
156}
157
158void __init e820_print_map(char *who)
159{
160 int i;
161
162 for (i = 0; i < e820.nr_map; i++) {
163 printk(KERN_INFO "%s: [mem %#018Lx-%#018Lx] ", who,
164 (unsigned long long) e820.map[i].addr,
165 (unsigned long long)
166 (e820.map[i].addr + e820.map[i].size - 1));
167 e820_print_type(e820.map[i].type);
168 printk(KERN_CONT "\n");
169 }
170}
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233struct change_member {
234 struct e820entry *pbios;
235 unsigned long long addr;
236};
237
238static int __init cpcompare(const void *a, const void *b)
239{
240 struct change_member * const *app = a, * const *bpp = b;
241 const struct change_member *ap = *app, *bp = *bpp;
242
243
244
245
246
247
248
249 if (ap->addr != bp->addr)
250 return ap->addr > bp->addr ? 1 : -1;
251
252 return (ap->addr != ap->pbios->addr) - (bp->addr != bp->pbios->addr);
253}
254
255int __init sanitize_e820_map(struct e820entry *biosmap, int max_nr_map,
256 u32 *pnr_map)
257{
258 static struct change_member change_point_list[2*E820_X_MAX] __initdata;
259 static struct change_member *change_point[2*E820_X_MAX] __initdata;
260 static struct e820entry *overlap_list[E820_X_MAX] __initdata;
261 static struct e820entry new_bios[E820_X_MAX] __initdata;
262 unsigned long current_type, last_type;
263 unsigned long long last_addr;
264 int chgidx;
265 int overlap_entries;
266 int new_bios_entry;
267 int old_nr, new_nr, chg_nr;
268 int i;
269
270
271 if (*pnr_map < 2)
272 return -1;
273
274 old_nr = *pnr_map;
275 BUG_ON(old_nr > max_nr_map);
276
277
278 for (i = 0; i < old_nr; i++)
279 if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
280 return -1;
281
282
283 for (i = 0; i < 2 * old_nr; i++)
284 change_point[i] = &change_point_list[i];
285
286
287
288 chgidx = 0;
289 for (i = 0; i < old_nr; i++) {
290 if (biosmap[i].size != 0) {
291 change_point[chgidx]->addr = biosmap[i].addr;
292 change_point[chgidx++]->pbios = &biosmap[i];
293 change_point[chgidx]->addr = biosmap[i].addr +
294 biosmap[i].size;
295 change_point[chgidx++]->pbios = &biosmap[i];
296 }
297 }
298 chg_nr = chgidx;
299
300
301 sort(change_point, chg_nr, sizeof *change_point, cpcompare, NULL);
302
303
304 overlap_entries = 0;
305 new_bios_entry = 0;
306 last_type = 0;
307 last_addr = 0;
308
309
310 for (chgidx = 0; chgidx < chg_nr; chgidx++) {
311
312 if (change_point[chgidx]->addr ==
313 change_point[chgidx]->pbios->addr) {
314
315
316
317
318 overlap_list[overlap_entries++] =
319 change_point[chgidx]->pbios;
320 } else {
321
322
323
324
325 for (i = 0; i < overlap_entries; i++) {
326 if (overlap_list[i] ==
327 change_point[chgidx]->pbios)
328 overlap_list[i] =
329 overlap_list[overlap_entries-1];
330 }
331 overlap_entries--;
332 }
333
334
335
336
337
338 current_type = 0;
339 for (i = 0; i < overlap_entries; i++)
340 if (overlap_list[i]->type > current_type)
341 current_type = overlap_list[i]->type;
342
343
344
345
346 if (current_type != last_type) {
347 if (last_type != 0) {
348 new_bios[new_bios_entry].size =
349 change_point[chgidx]->addr - last_addr;
350
351
352
353
354 if (new_bios[new_bios_entry].size != 0)
355
356
357
358
359 if (++new_bios_entry >= max_nr_map)
360 break;
361 }
362 if (current_type != 0) {
363 new_bios[new_bios_entry].addr =
364 change_point[chgidx]->addr;
365 new_bios[new_bios_entry].type = current_type;
366 last_addr = change_point[chgidx]->addr;
367 }
368 last_type = current_type;
369 }
370 }
371
372 new_nr = new_bios_entry;
373
374
375 memcpy(biosmap, new_bios, new_nr * sizeof(struct e820entry));
376 *pnr_map = new_nr;
377
378 return 0;
379}
380
381static int __init __append_e820_map(struct e820entry *biosmap, int nr_map)
382{
383 while (nr_map) {
384 u64 start = biosmap->addr;
385 u64 size = biosmap->size;
386 u64 end = start + size;
387 u32 type = biosmap->type;
388
389
390 if (start > end)
391 return -1;
392
393 e820_add_region(start, size, type);
394
395 biosmap++;
396 nr_map--;
397 }
398 return 0;
399}
400
401
402
403
404
405
406
407
408
409
410static int __init append_e820_map(struct e820entry *biosmap, int nr_map)
411{
412
413 if (nr_map < 2)
414 return -1;
415
416 return __append_e820_map(biosmap, nr_map);
417}
418
419static u64 __init __e820_update_range(struct e820map *e820x, u64 start,
420 u64 size, unsigned old_type,
421 unsigned new_type)
422{
423 u64 end;
424 unsigned int i;
425 u64 real_updated_size = 0;
426
427 BUG_ON(old_type == new_type);
428
429 if (size > (ULLONG_MAX - start))
430 size = ULLONG_MAX - start;
431
432 end = start + size;
433 printk(KERN_DEBUG "e820: update [mem %#010Lx-%#010Lx] ",
434 (unsigned long long) start, (unsigned long long) (end - 1));
435 e820_print_type(old_type);
436 printk(KERN_CONT " ==> ");
437 e820_print_type(new_type);
438 printk(KERN_CONT "\n");
439
440 for (i = 0; i < e820x->nr_map; i++) {
441 struct e820entry *ei = &e820x->map[i];
442 u64 final_start, final_end;
443 u64 ei_end;
444
445 if (ei->type != old_type)
446 continue;
447
448 ei_end = ei->addr + ei->size;
449
450 if (ei->addr >= start && ei_end <= end) {
451 ei->type = new_type;
452 real_updated_size += ei->size;
453 continue;
454 }
455
456
457 if (ei->addr < start && ei_end > end) {
458 __e820_add_region(e820x, start, size, new_type);
459 __e820_add_region(e820x, end, ei_end - end, ei->type);
460 ei->size = start - ei->addr;
461 real_updated_size += size;
462 continue;
463 }
464
465
466 final_start = max(start, ei->addr);
467 final_end = min(end, ei_end);
468 if (final_start >= final_end)
469 continue;
470
471 __e820_add_region(e820x, final_start, final_end - final_start,
472 new_type);
473
474 real_updated_size += final_end - final_start;
475
476
477
478
479
480 ei->size -= final_end - final_start;
481 if (ei->addr < final_start)
482 continue;
483 ei->addr = final_end;
484 }
485 return real_updated_size;
486}
487
488u64 __init e820_update_range(u64 start, u64 size, unsigned old_type,
489 unsigned new_type)
490{
491 return __e820_update_range(&e820, start, size, old_type, new_type);
492}
493
494static u64 __init e820_update_range_saved(u64 start, u64 size,
495 unsigned old_type, unsigned new_type)
496{
497 return __e820_update_range(&e820_saved, start, size, old_type,
498 new_type);
499}
500
501
502u64 __init e820_remove_range(u64 start, u64 size, unsigned old_type,
503 int checktype)
504{
505 int i;
506 u64 end;
507 u64 real_removed_size = 0;
508
509 if (size > (ULLONG_MAX - start))
510 size = ULLONG_MAX - start;
511
512 end = start + size;
513 printk(KERN_DEBUG "e820: remove [mem %#010Lx-%#010Lx] ",
514 (unsigned long long) start, (unsigned long long) (end - 1));
515 if (checktype)
516 e820_print_type(old_type);
517 printk(KERN_CONT "\n");
518
519 for (i = 0; i < e820.nr_map; i++) {
520 struct e820entry *ei = &e820.map[i];
521 u64 final_start, final_end;
522 u64 ei_end;
523
524 if (checktype && ei->type != old_type)
525 continue;
526
527 ei_end = ei->addr + ei->size;
528
529 if (ei->addr >= start && ei_end <= end) {
530 real_removed_size += ei->size;
531 memset(ei, 0, sizeof(struct e820entry));
532 continue;
533 }
534
535
536 if (ei->addr < start && ei_end > end) {
537 e820_add_region(end, ei_end - end, ei->type);
538 ei->size = start - ei->addr;
539 real_removed_size += size;
540 continue;
541 }
542
543
544 final_start = max(start, ei->addr);
545 final_end = min(end, ei_end);
546 if (final_start >= final_end)
547 continue;
548 real_removed_size += final_end - final_start;
549
550
551
552
553
554 ei->size -= final_end - final_start;
555 if (ei->addr < final_start)
556 continue;
557 ei->addr = final_end;
558 }
559 return real_removed_size;
560}
561
562void __init update_e820(void)
563{
564 u32 nr_map;
565
566 nr_map = e820.nr_map;
567 if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr_map))
568 return;
569 e820.nr_map = nr_map;
570 printk(KERN_INFO "e820: modified physical RAM map:\n");
571 e820_print_map("modified");
572}
573static void __init update_e820_saved(void)
574{
575 u32 nr_map;
576
577 nr_map = e820_saved.nr_map;
578 if (sanitize_e820_map(e820_saved.map, ARRAY_SIZE(e820_saved.map), &nr_map))
579 return;
580 e820_saved.nr_map = nr_map;
581}
582#define MAX_GAP_END 0x100000000ull
583
584
585
586__init int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
587 unsigned long start_addr, unsigned long long end_addr)
588{
589 unsigned long long last;
590 int i = e820.nr_map;
591 int found = 0;
592
593 last = (end_addr && end_addr < MAX_GAP_END) ? end_addr : MAX_GAP_END;
594
595 while (--i >= 0) {
596 unsigned long long start = e820.map[i].addr;
597 unsigned long long end = start + e820.map[i].size;
598
599 if (end < start_addr)
600 continue;
601
602
603
604
605
606 if (last > end) {
607 unsigned long gap = last - end;
608
609 if (gap >= *gapsize) {
610 *gapsize = gap;
611 *gapstart = end;
612 found = 1;
613 }
614 }
615 if (start < last)
616 last = start;
617 }
618 return found;
619}
620
621
622
623
624
625
626
627__init void e820_setup_gap(void)
628{
629 unsigned long gapstart, gapsize;
630 int found;
631
632 gapstart = 0x10000000;
633 gapsize = 0x400000;
634 found = e820_search_gap(&gapstart, &gapsize, 0, MAX_GAP_END);
635
636#ifdef CONFIG_X86_64
637 if (!found) {
638 gapstart = (max_pfn << PAGE_SHIFT) + 1024*1024;
639 printk(KERN_ERR
640 "e820: cannot find a gap in the 32bit address range\n"
641 "e820: PCI devices with unassigned 32bit BARs may break!\n");
642 }
643#endif
644
645
646
647
648 pci_mem_start = gapstart;
649
650 printk(KERN_INFO
651 "e820: [mem %#010lx-%#010lx] available for PCI devices\n",
652 gapstart, gapstart + gapsize - 1);
653}
654
655
656
657
658
659
660
661void __init parse_e820_ext(struct setup_data *sdata)
662{
663 int entries;
664 struct e820entry *extmap;
665
666 entries = sdata->len / sizeof(struct e820entry);
667 extmap = (struct e820entry *)(sdata->data);
668 __append_e820_map(extmap, entries);
669 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
670 printk(KERN_INFO "e820: extended physical RAM map:\n");
671 e820_print_map("extended");
672}
673
674#if defined(CONFIG_X86_64) || \
675 (defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION))
676
677
678
679
680
681
682
683
684void __init e820_mark_nosave_regions(unsigned long limit_pfn)
685{
686 int i;
687 unsigned long pfn;
688
689 pfn = PFN_DOWN(e820.map[0].addr + e820.map[0].size);
690 for (i = 1; i < e820.nr_map; i++) {
691 struct e820entry *ei = &e820.map[i];
692
693 if (pfn < PFN_UP(ei->addr))
694 register_nosave_region(pfn, PFN_UP(ei->addr));
695
696 pfn = PFN_DOWN(ei->addr + ei->size);
697 if (ei->type != E820_RAM && ei->type != E820_RESERVED_KERN)
698 register_nosave_region(PFN_UP(ei->addr), pfn);
699
700 if (pfn >= limit_pfn)
701 break;
702 }
703}
704#endif
705
706#ifdef CONFIG_ACPI
707
708
709
710
711static int __init e820_mark_nvs_memory(void)
712{
713 int i;
714
715 for (i = 0; i < e820.nr_map; i++) {
716 struct e820entry *ei = &e820.map[i];
717
718 if (ei->type == E820_NVS)
719 acpi_nvs_register(ei->addr, ei->size);
720 }
721
722 return 0;
723}
724core_initcall(e820_mark_nvs_memory);
725#endif
726
727
728
729
730u64 __init early_reserve_e820(u64 size, u64 align)
731{
732 u64 addr;
733
734 addr = __memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
735 if (addr) {
736 e820_update_range_saved(addr, size, E820_RAM, E820_RESERVED);
737 printk(KERN_INFO "e820: update e820_saved for early_reserve_e820\n");
738 update_e820_saved();
739 }
740
741 return addr;
742}
743
744#ifdef CONFIG_X86_32
745# ifdef CONFIG_X86_PAE
746# define MAX_ARCH_PFN (1ULL<<(36-PAGE_SHIFT))
747# else
748# define MAX_ARCH_PFN (1ULL<<(32-PAGE_SHIFT))
749# endif
750#else
751# define MAX_ARCH_PFN MAXMEM>>PAGE_SHIFT
752#endif
753
754
755
756
757static unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type)
758{
759 int i;
760 unsigned long last_pfn = 0;
761 unsigned long max_arch_pfn = MAX_ARCH_PFN;
762
763 for (i = 0; i < e820.nr_map; i++) {
764 struct e820entry *ei = &e820.map[i];
765 unsigned long start_pfn;
766 unsigned long end_pfn;
767
768 if (ei->type != type)
769 continue;
770
771 start_pfn = ei->addr >> PAGE_SHIFT;
772 end_pfn = (ei->addr + ei->size) >> PAGE_SHIFT;
773
774 if (start_pfn >= limit_pfn)
775 continue;
776 if (end_pfn > limit_pfn) {
777 last_pfn = limit_pfn;
778 break;
779 }
780 if (end_pfn > last_pfn)
781 last_pfn = end_pfn;
782 }
783
784 if (last_pfn > max_arch_pfn)
785 last_pfn = max_arch_pfn;
786
787 printk(KERN_INFO "e820: last_pfn = %#lx max_arch_pfn = %#lx\n",
788 last_pfn, max_arch_pfn);
789 return last_pfn;
790}
791unsigned long __init e820_end_of_ram_pfn(void)
792{
793 return e820_end_pfn(MAX_ARCH_PFN, E820_RAM);
794}
795
796unsigned long __init e820_end_of_low_ram_pfn(void)
797{
798 return e820_end_pfn(1UL<<(32 - PAGE_SHIFT), E820_RAM);
799}
800
801static void early_panic(char *msg)
802{
803 early_printk(msg);
804 panic(msg);
805}
806
807static int userdef __initdata;
808
809
810static int __init parse_memopt(char *p)
811{
812 u64 mem_size;
813
814 if (!p)
815 return -EINVAL;
816
817 if (!strcmp(p, "nopentium")) {
818#ifdef CONFIG_X86_32
819 setup_clear_cpu_cap(X86_FEATURE_PSE);
820 return 0;
821#else
822 printk(KERN_WARNING "mem=nopentium ignored! (only supported on x86_32)\n");
823 return -EINVAL;
824#endif
825 }
826
827 userdef = 1;
828 mem_size = memparse(p, &p);
829
830 if (mem_size == 0)
831 return -EINVAL;
832 e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1);
833
834 return 0;
835}
836early_param("mem", parse_memopt);
837
838static int __init parse_memmap_one(char *p)
839{
840 char *oldp;
841 u64 start_at, mem_size;
842
843 if (!p)
844 return -EINVAL;
845
846 if (!strncmp(p, "exactmap", 8)) {
847#ifdef CONFIG_CRASH_DUMP
848
849
850
851
852
853 saved_max_pfn = e820_end_of_ram_pfn();
854#endif
855 e820.nr_map = 0;
856 userdef = 1;
857 return 0;
858 }
859
860 oldp = p;
861 mem_size = memparse(p, &p);
862 if (p == oldp)
863 return -EINVAL;
864
865 userdef = 1;
866 if (*p == '@') {
867 start_at = memparse(p+1, &p);
868 e820_add_region(start_at, mem_size, E820_RAM);
869 } else if (*p == '#') {
870 start_at = memparse(p+1, &p);
871 e820_add_region(start_at, mem_size, E820_ACPI);
872 } else if (*p == '$') {
873 start_at = memparse(p+1, &p);
874 e820_add_region(start_at, mem_size, E820_RESERVED);
875 } else
876 e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1);
877
878 return *p == '\0' ? 0 : -EINVAL;
879}
880static int __init parse_memmap_opt(char *str)
881{
882 while (str) {
883 char *k = strchr(str, ',');
884
885 if (k)
886 *k++ = 0;
887
888 parse_memmap_one(str);
889 str = k;
890 }
891
892 return 0;
893}
894early_param("memmap", parse_memmap_opt);
895
896void __init finish_e820_parsing(void)
897{
898 if (userdef) {
899 u32 nr = e820.nr_map;
900
901 if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr) < 0)
902 early_panic("Invalid user supplied memory map");
903 e820.nr_map = nr;
904
905 printk(KERN_INFO "e820: user-defined physical RAM map:\n");
906 e820_print_map("user");
907 }
908}
909
910static inline const char *e820_type_to_string(int e820_type)
911{
912 switch (e820_type) {
913 case E820_RESERVED_KERN:
914 case E820_RAM: return "System RAM";
915 case E820_ACPI: return "ACPI Tables";
916 case E820_NVS: return "ACPI Non-volatile Storage";
917 case E820_UNUSABLE: return "Unusable memory";
918 default: return "reserved";
919 }
920}
921
922
923
924
925static struct resource __initdata *e820_res;
926void __init e820_reserve_resources(void)
927{
928 int i;
929 struct resource *res;
930 u64 end;
931
932 res = alloc_bootmem(sizeof(struct resource) * e820.nr_map);
933 e820_res = res;
934 for (i = 0; i < e820.nr_map; i++) {
935 end = e820.map[i].addr + e820.map[i].size - 1;
936 if (end != (resource_size_t)end) {
937 res++;
938 continue;
939 }
940 res->name = e820_type_to_string(e820.map[i].type);
941 res->start = e820.map[i].addr;
942 res->end = end;
943
944 res->flags = IORESOURCE_MEM;
945
946
947
948
949
950
951 if (e820.map[i].type != E820_RESERVED || res->start < (1ULL<<20)) {
952 res->flags |= IORESOURCE_BUSY;
953 insert_resource(&iomem_resource, res);
954 }
955 res++;
956 }
957
958 for (i = 0; i < e820_saved.nr_map; i++) {
959 struct e820entry *entry = &e820_saved.map[i];
960 firmware_map_add_early(entry->addr,
961 entry->addr + entry->size,
962 e820_type_to_string(entry->type));
963 }
964}
965
966
967static unsigned long ram_alignment(resource_size_t pos)
968{
969 unsigned long mb = pos >> 20;
970
971
972 if (!mb)
973 return 64*1024;
974
975
976 if (mb < 16)
977 return 1024*1024;
978
979
980 return 64*1024*1024;
981}
982
983#define MAX_RESOURCE_SIZE ((resource_size_t)-1)
984
985void __init e820_reserve_resources_late(void)
986{
987 int i;
988 struct resource *res;
989
990 res = e820_res;
991 for (i = 0; i < e820.nr_map; i++) {
992 if (!res->parent && res->end)
993 insert_resource_expand_to_fit(&iomem_resource, res);
994 res++;
995 }
996
997
998
999
1000
1001 for (i = 0; i < e820.nr_map; i++) {
1002 struct e820entry *entry = &e820.map[i];
1003 u64 start, end;
1004
1005 if (entry->type != E820_RAM)
1006 continue;
1007 start = entry->addr + entry->size;
1008 end = round_up(start, ram_alignment(start)) - 1;
1009 if (end > MAX_RESOURCE_SIZE)
1010 end = MAX_RESOURCE_SIZE;
1011 if (start >= end)
1012 continue;
1013 printk(KERN_DEBUG
1014 "e820: reserve RAM buffer [mem %#010llx-%#010llx]\n",
1015 start, end);
1016 reserve_region_with_split(&iomem_resource, start, end,
1017 "RAM buffer");
1018 }
1019}
1020
1021char *__init default_machine_specific_memory_setup(void)
1022{
1023 char *who = "BIOS-e820";
1024 u32 new_nr;
1025
1026
1027
1028
1029
1030
1031 new_nr = boot_params.e820_entries;
1032 sanitize_e820_map(boot_params.e820_map,
1033 ARRAY_SIZE(boot_params.e820_map),
1034 &new_nr);
1035 boot_params.e820_entries = new_nr;
1036 if (append_e820_map(boot_params.e820_map, boot_params.e820_entries)
1037 < 0) {
1038 u64 mem_size;
1039
1040
1041 if (boot_params.alt_mem_k
1042 < boot_params.screen_info.ext_mem_k) {
1043 mem_size = boot_params.screen_info.ext_mem_k;
1044 who = "BIOS-88";
1045 } else {
1046 mem_size = boot_params.alt_mem_k;
1047 who = "BIOS-e801";
1048 }
1049
1050 e820.nr_map = 0;
1051 e820_add_region(0, LOWMEMSIZE(), E820_RAM);
1052 e820_add_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
1053 }
1054
1055
1056 return who;
1057}
1058
1059void __init setup_memory_map(void)
1060{
1061 char *who;
1062
1063 who = x86_init.resources.memory_setup();
1064 memcpy(&e820_saved, &e820, sizeof(struct e820map));
1065 printk(KERN_INFO "e820: BIOS-provided physical RAM map:\n");
1066 e820_print_map(who);
1067}
1068
1069void __init memblock_x86_fill(void)
1070{
1071 int i;
1072 u64 end;
1073
1074
1075
1076
1077
1078
1079 memblock_allow_resize();
1080
1081 for (i = 0; i < e820.nr_map; i++) {
1082 struct e820entry *ei = &e820.map[i];
1083
1084 end = ei->addr + ei->size;
1085 if (end != (resource_size_t)end)
1086 continue;
1087
1088 if (ei->type != E820_RAM && ei->type != E820_RESERVED_KERN)
1089 continue;
1090
1091 memblock_add(ei->addr, ei->size);
1092 }
1093
1094
1095 memblock_trim_memory(PAGE_SIZE);
1096
1097 memblock_dump_all();
1098}
1099
1100void __init memblock_find_dma_reserve(void)
1101{
1102#ifdef CONFIG_X86_64
1103 u64 nr_pages = 0, nr_free_pages = 0;
1104 unsigned long start_pfn, end_pfn;
1105 phys_addr_t start, end;
1106 int i;
1107 u64 u;
1108
1109
1110
1111
1112
1113
1114 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
1115 start_pfn = min_t(unsigned long, start_pfn, MAX_DMA_PFN);
1116 end_pfn = min_t(unsigned long, end_pfn, MAX_DMA_PFN);
1117 nr_pages += end_pfn - start_pfn;
1118 }
1119
1120 for_each_free_mem_range(u, MAX_NUMNODES, &start, &end, NULL) {
1121 start_pfn = min_t(unsigned long, PFN_UP(start), MAX_DMA_PFN);
1122 end_pfn = min_t(unsigned long, PFN_DOWN(end), MAX_DMA_PFN);
1123 if (start_pfn < end_pfn)
1124 nr_free_pages += end_pfn - start_pfn;
1125 }
1126
1127 set_dma_reserve(nr_pages - nr_free_pages);
1128#endif
1129}
1130