1
2
3
4
5
6#define pr_fmt(fmt) "efi: " fmt
7
8#include <linux/init.h>
9#include <linux/kernel.h>
10#include <linux/efi.h>
11#include <linux/io.h>
12#include <asm/early_ioremap.h>
13#include <linux/memblock.h>
14#include <linux/slab.h>
15
16static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size)
17{
18 return memblock_phys_alloc(size, SMP_CACHE_BYTES);
19}
20
21static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
22{
23 unsigned int order = get_order(size);
24 struct page *p = alloc_pages(GFP_KERNEL, order);
25
26 if (!p)
27 return 0;
28
29 return PFN_PHYS(page_to_pfn(p));
30}
31
32
33
34
35
36
37
38
39
40
41
42phys_addr_t __init efi_memmap_alloc(unsigned int num_entries)
43{
44 unsigned long size = num_entries * efi.memmap.desc_size;
45
46 if (slab_is_available())
47 return __efi_memmap_alloc_late(size);
48
49 return __efi_memmap_alloc_early(size);
50}
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68static int __init
69__efi_memmap_init(struct efi_memory_map_data *data, bool late)
70{
71 struct efi_memory_map map;
72 phys_addr_t phys_map;
73
74 if (efi_enabled(EFI_PARAVIRT))
75 return 0;
76
77 phys_map = data->phys_map;
78
79 if (late)
80 map.map = memremap(phys_map, data->size, MEMREMAP_WB);
81 else
82 map.map = early_memremap(phys_map, data->size);
83
84 if (!map.map) {
85 pr_err("Could not map the memory map!\n");
86 return -ENOMEM;
87 }
88
89 map.phys_map = data->phys_map;
90 map.nr_map = data->size / data->desc_size;
91 map.map_end = map.map + data->size;
92
93 map.desc_version = data->desc_version;
94 map.desc_size = data->desc_size;
95 map.late = late;
96
97 set_bit(EFI_MEMMAP, &efi.flags);
98
99 efi.memmap = map;
100
101 return 0;
102}
103
104
105
106
107
108
109
110
111int __init efi_memmap_init_early(struct efi_memory_map_data *data)
112{
113
114 WARN_ON(efi.memmap.late);
115
116 return __efi_memmap_init(data, false);
117}
118
119void __init efi_memmap_unmap(void)
120{
121 if (!efi_enabled(EFI_MEMMAP))
122 return;
123
124 if (!efi.memmap.late) {
125 unsigned long size;
126
127 size = efi.memmap.desc_size * efi.memmap.nr_map;
128 early_memunmap(efi.memmap.map, size);
129 } else {
130 memunmap(efi.memmap.map);
131 }
132
133 efi.memmap.map = NULL;
134 clear_bit(EFI_MEMMAP, &efi.flags);
135}
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size)
161{
162 struct efi_memory_map_data data = {
163 .phys_map = addr,
164 .size = size,
165 };
166
167
168 WARN_ON(efi.memmap.map);
169
170
171 WARN_ON(efi.memmap.late);
172
173
174
175
176
177
178 data.desc_version = efi.memmap.desc_version;
179 data.desc_size = efi.memmap.desc_size;
180
181 return __efi_memmap_init(&data, true);
182}
183
184
185
186
187
188
189
190
191
192
193
194
195int __init efi_memmap_install(phys_addr_t addr, unsigned int nr_map)
196{
197 struct efi_memory_map_data data;
198
199 efi_memmap_unmap();
200
201 data.phys_map = addr;
202 data.size = efi.memmap.desc_size * nr_map;
203 data.desc_version = efi.memmap.desc_version;
204 data.desc_size = efi.memmap.desc_size;
205
206 return __efi_memmap_init(&data, efi.memmap.late);
207}
208
209
210
211
212
213
214
215
216
217int __init efi_memmap_split_count(efi_memory_desc_t *md, struct range *range)
218{
219 u64 m_start, m_end;
220 u64 start, end;
221 int count = 0;
222
223 start = md->phys_addr;
224 end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1;
225
226
227 m_start = range->start;
228 m_end = range->end;
229
230 if (m_start <= start) {
231
232 if (start < m_end && m_end < end)
233 count++;
234 }
235
236 if (start < m_start && m_start < end) {
237
238 if (m_end < end)
239 count += 2;
240
241 if (end <= m_end)
242 count++;
243 }
244
245 return count;
246}
247
248
249
250
251
252
253
254
255
256
257void __init efi_memmap_insert(struct efi_memory_map *old_memmap, void *buf,
258 struct efi_mem_range *mem)
259{
260 u64 m_start, m_end, m_attr;
261 efi_memory_desc_t *md;
262 u64 start, end;
263 void *old, *new;
264
265
266 m_start = mem->range.start;
267 m_end = mem->range.end;
268 m_attr = mem->attribute;
269
270
271
272
273
274
275 if (!IS_ALIGNED(m_start, EFI_PAGE_SIZE) ||
276 !IS_ALIGNED(m_end + 1, EFI_PAGE_SIZE)) {
277 WARN_ON(1);
278 return;
279 }
280
281 for (old = old_memmap->map, new = buf;
282 old < old_memmap->map_end;
283 old += old_memmap->desc_size, new += old_memmap->desc_size) {
284
285
286 memcpy(new, old, old_memmap->desc_size);
287 md = new;
288 start = md->phys_addr;
289 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
290
291 if (m_start <= start && end <= m_end)
292 md->attribute |= m_attr;
293
294 if (m_start <= start &&
295 (start < m_end && m_end < end)) {
296
297 md->attribute |= m_attr;
298 md->num_pages = (m_end - md->phys_addr + 1) >>
299 EFI_PAGE_SHIFT;
300
301 new += old_memmap->desc_size;
302 memcpy(new, old, old_memmap->desc_size);
303 md = new;
304 md->phys_addr = m_end + 1;
305 md->num_pages = (end - md->phys_addr + 1) >>
306 EFI_PAGE_SHIFT;
307 }
308
309 if ((start < m_start && m_start < end) && m_end < end) {
310
311 md->num_pages = (m_start - md->phys_addr) >>
312 EFI_PAGE_SHIFT;
313
314 new += old_memmap->desc_size;
315 memcpy(new, old, old_memmap->desc_size);
316 md = new;
317 md->attribute |= m_attr;
318 md->phys_addr = m_start;
319 md->num_pages = (m_end - m_start + 1) >>
320 EFI_PAGE_SHIFT;
321
322 new += old_memmap->desc_size;
323 memcpy(new, old, old_memmap->desc_size);
324 md = new;
325 md->phys_addr = m_end + 1;
326 md->num_pages = (end - m_end) >>
327 EFI_PAGE_SHIFT;
328 }
329
330 if ((start < m_start && m_start < end) &&
331 (end <= m_end)) {
332
333 md->num_pages = (m_start - md->phys_addr) >>
334 EFI_PAGE_SHIFT;
335
336 new += old_memmap->desc_size;
337 memcpy(new, old, old_memmap->desc_size);
338 md = new;
339 md->phys_addr = m_start;
340 md->num_pages = (end - md->phys_addr + 1) >>
341 EFI_PAGE_SHIFT;
342 md->attribute |= m_attr;
343 }
344 }
345}
346