linux/arch/x86/boot/compressed/misc.c
<<
>>
Prefs
   1/*
   2 * misc.c
   3 *
   4 * This is a collection of several routines from gzip-1.0.3
   5 * adapted for Linux.
   6 *
   7 * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
   8 * puts by Nick Holloway 1993, better puts by Martin Mares 1995
   9 * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
  10 */
  11
  12#include "misc.h"
  13#include "../string.h"
  14
  15/* WARNING!!
  16 * This code is compiled with -fPIC and it is relocated dynamically
  17 * at run time, but no relocation processing is performed.
  18 * This means that it is not safe to place pointers in static structures.
  19 */
  20
  21/*
  22 * Getting to provable safe in place decompression is hard.
  23 * Worst case behaviours need to be analyzed.
  24 * Background information:
  25 *
  26 * The file layout is:
  27 *    magic[2]
  28 *    method[1]
  29 *    flags[1]
  30 *    timestamp[4]
  31 *    extraflags[1]
  32 *    os[1]
  33 *    compressed data blocks[N]
  34 *    crc[4] orig_len[4]
  35 *
  36 * resulting in 18 bytes of non compressed data overhead.
  37 *
  38 * Files divided into blocks
  39 * 1 bit (last block flag)
  40 * 2 bits (block type)
  41 *
  42 * 1 block occurs every 32K -1 bytes or when there 50% compression
  43 * has been achieved. The smallest block type encoding is always used.
  44 *
  45 * stored:
  46 *    32 bits length in bytes.
  47 *
  48 * fixed:
  49 *    magic fixed tree.
  50 *    symbols.
  51 *
  52 * dynamic:
  53 *    dynamic tree encoding.
  54 *    symbols.
  55 *
  56 *
  57 * The buffer for decompression in place is the length of the
  58 * uncompressed data, plus a small amount extra to keep the algorithm safe.
  59 * The compressed data is placed at the end of the buffer.  The output
  60 * pointer is placed at the start of the buffer and the input pointer
  61 * is placed where the compressed data starts.  Problems will occur
  62 * when the output pointer overruns the input pointer.
  63 *
  64 * The output pointer can only overrun the input pointer if the input
  65 * pointer is moving faster than the output pointer.  A condition only
  66 * triggered by data whose compressed form is larger than the uncompressed
  67 * form.
  68 *
  69 * The worst case at the block level is a growth of the compressed data
  70 * of 5 bytes per 32767 bytes.
  71 *
  72 * The worst case internal to a compressed block is very hard to figure.
  73 * The worst case can at least be boundined by having one bit that represents
  74 * 32764 bytes and then all of the rest of the bytes representing the very
  75 * very last byte.
  76 *
  77 * All of which is enough to compute an amount of extra data that is required
  78 * to be safe.  To avoid problems at the block level allocating 5 extra bytes
  79 * per 32767 bytes of data is sufficient.  To avoind problems internal to a
  80 * block adding an extra 32767 bytes (the worst case uncompressed block size)
  81 * is sufficient, to ensure that in the worst case the decompressed data for
  82 * block will stop the byte before the compressed data for a block begins.
  83 * To avoid problems with the compressed data's meta information an extra 18
  84 * bytes are needed.  Leading to the formula:
  85 *
  86 * extra_bytes = (uncompressed_size >> 12) + 32768 + 18 + decompressor_size.
  87 *
  88 * Adding 8 bytes per 32K is a bit excessive but much easier to calculate.
  89 * Adding 32768 instead of 32767 just makes for round numbers.
  90 * Adding the decompressor_size is necessary as it musht live after all
  91 * of the data as well.  Last I measured the decompressor is about 14K.
  92 * 10K of actual data and 4K of bss.
  93 *
  94 */
  95
  96/*
  97 * gzip declarations
  98 */
  99#define STATIC          static
 100
 101#undef memcpy
 102
 103/*
 104 * Use a normal definition of memset() from string.c. There are already
 105 * included header files which expect a definition of memset() and by
 106 * the time we define memset macro, it is too late.
 107 */
 108#undef memset
 109#define memzero(s, n)   memset((s), 0, (n))
 110
 111
 112static void error(char *m);
 113
 114/*
 115 * This is set up by the setup-routine at boot-time
 116 */
 117struct boot_params *real_mode;          /* Pointer to real-mode data */
 118
 119memptr free_mem_ptr;
 120memptr free_mem_end_ptr;
 121
 122static char *vidmem;
 123static int vidport;
 124static int lines, cols;
 125
 126#ifdef CONFIG_KERNEL_GZIP
 127#include "../../../../lib/decompress_inflate.c"
 128#endif
 129
 130#ifdef CONFIG_KERNEL_BZIP2
 131#include "../../../../lib/decompress_bunzip2.c"
 132#endif
 133
 134#ifdef CONFIG_KERNEL_LZMA
 135#include "../../../../lib/decompress_unlzma.c"
 136#endif
 137
 138#ifdef CONFIG_KERNEL_XZ
 139#include "../../../../lib/decompress_unxz.c"
 140#endif
 141
 142#ifdef CONFIG_KERNEL_LZO
 143#include "../../../../lib/decompress_unlzo.c"
 144#endif
 145
 146#ifdef CONFIG_KERNEL_LZ4
 147#include "../../../../lib/decompress_unlz4.c"
 148#endif
 149
 150static void scroll(void)
 151{
 152        int i;
 153
 154        memcpy(vidmem, vidmem + cols * 2, (lines - 1) * cols * 2);
 155        for (i = (lines - 1) * cols * 2; i < lines * cols * 2; i += 2)
 156                vidmem[i] = ' ';
 157}
 158
 159#define XMTRDY          0x20
 160
 161#define TXR             0       /*  Transmit register (WRITE) */
 162#define LSR             5       /*  Line Status               */
 163static void serial_putchar(int ch)
 164{
 165        unsigned timeout = 0xffff;
 166
 167        while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout)
 168                cpu_relax();
 169
 170        outb(ch, early_serial_base + TXR);
 171}
 172
 173void __putstr(const char *s)
 174{
 175        int x, y, pos;
 176        char c;
 177
 178        if (early_serial_base) {
 179                const char *str = s;
 180                while (*str) {
 181                        if (*str == '\n')
 182                                serial_putchar('\r');
 183                        serial_putchar(*str++);
 184                }
 185        }
 186
 187        if (real_mode->screen_info.orig_video_mode == 0 &&
 188            lines == 0 && cols == 0)
 189                return;
 190
 191        x = real_mode->screen_info.orig_x;
 192        y = real_mode->screen_info.orig_y;
 193
 194        while ((c = *s++) != '\0') {
 195                if (c == '\n') {
 196                        x = 0;
 197                        if (++y >= lines) {
 198                                scroll();
 199                                y--;
 200                        }
 201                } else {
 202                        vidmem[(x + cols * y) * 2] = c;
 203                        if (++x >= cols) {
 204                                x = 0;
 205                                if (++y >= lines) {
 206                                        scroll();
 207                                        y--;
 208                                }
 209                        }
 210                }
 211        }
 212
 213        real_mode->screen_info.orig_x = x;
 214        real_mode->screen_info.orig_y = y;
 215
 216        pos = (x + cols * y) * 2;       /* Update cursor position */
 217        outb(14, vidport);
 218        outb(0xff & (pos >> 9), vidport+1);
 219        outb(15, vidport);
 220        outb(0xff & (pos >> 1), vidport+1);
 221}
 222
 223static void error(char *x)
 224{
 225        error_putstr("\n\n");
 226        error_putstr(x);
 227        error_putstr("\n\n -- System halted");
 228
 229        while (1)
 230                asm("hlt");
 231}
 232
 233#if CONFIG_X86_NEED_RELOCS
 234static void handle_relocations(void *output, unsigned long output_len)
 235{
 236        int *reloc;
 237        unsigned long delta, map, ptr;
 238        unsigned long min_addr = (unsigned long)output;
 239        unsigned long max_addr = min_addr + output_len;
 240
 241        /*
 242         * Calculate the delta between where vmlinux was linked to load
 243         * and where it was actually loaded.
 244         */
 245        delta = min_addr - LOAD_PHYSICAL_ADDR;
 246        if (!delta) {
 247                debug_putstr("No relocation needed... ");
 248                return;
 249        }
 250        debug_putstr("Performing relocations... ");
 251
 252        /*
 253         * The kernel contains a table of relocation addresses. Those
 254         * addresses have the final load address of the kernel in virtual
 255         * memory. We are currently working in the self map. So we need to
 256         * create an adjustment for kernel memory addresses to the self map.
 257         * This will involve subtracting out the base address of the kernel.
 258         */
 259        map = delta - __START_KERNEL_map;
 260
 261        /*
 262         * Process relocations: 32 bit relocations first then 64 bit after.
 263         * Three sets of binary relocations are added to the end of the kernel
 264         * before compression. Each relocation table entry is the kernel
 265         * address of the location which needs to be updated stored as a
 266         * 32-bit value which is sign extended to 64 bits.
 267         *
 268         * Format is:
 269         *
 270         * kernel bits...
 271         * 0 - zero terminator for 64 bit relocations
 272         * 64 bit relocation repeated
 273         * 0 - zero terminator for inverse 32 bit relocations
 274         * 32 bit inverse relocation repeated
 275         * 0 - zero terminator for 32 bit relocations
 276         * 32 bit relocation repeated
 277         *
 278         * So we work backwards from the end of the decompressed image.
 279         */
 280        for (reloc = output + output_len - sizeof(*reloc); *reloc; reloc--) {
 281                int extended = *reloc;
 282                extended += map;
 283
 284                ptr = (unsigned long)extended;
 285                if (ptr < min_addr || ptr > max_addr)
 286                        error("32-bit relocation outside of kernel!\n");
 287
 288                *(uint32_t *)ptr += delta;
 289        }
 290#ifdef CONFIG_X86_64
 291        while (*--reloc) {
 292                long extended = *reloc;
 293                extended += map;
 294
 295                ptr = (unsigned long)extended;
 296                if (ptr < min_addr || ptr > max_addr)
 297                        error("inverse 32-bit relocation outside of kernel!\n");
 298
 299                *(int32_t *)ptr -= delta;
 300        }
 301        for (reloc--; *reloc; reloc--) {
 302                long extended = *reloc;
 303                extended += map;
 304
 305                ptr = (unsigned long)extended;
 306                if (ptr < min_addr || ptr > max_addr)
 307                        error("64-bit relocation outside of kernel!\n");
 308
 309                *(uint64_t *)ptr += delta;
 310        }
 311#endif
 312}
 313#else
 314static inline void handle_relocations(void *output, unsigned long output_len)
 315{ }
 316#endif
 317
 318static void parse_elf(void *output)
 319{
 320#ifdef CONFIG_X86_64
 321        Elf64_Ehdr ehdr;
 322        Elf64_Phdr *phdrs, *phdr;
 323#else
 324        Elf32_Ehdr ehdr;
 325        Elf32_Phdr *phdrs, *phdr;
 326#endif
 327        void *dest;
 328        int i;
 329
 330        memcpy(&ehdr, output, sizeof(ehdr));
 331        if (ehdr.e_ident[EI_MAG0] != ELFMAG0 ||
 332           ehdr.e_ident[EI_MAG1] != ELFMAG1 ||
 333           ehdr.e_ident[EI_MAG2] != ELFMAG2 ||
 334           ehdr.e_ident[EI_MAG3] != ELFMAG3) {
 335                error("Kernel is not a valid ELF file");
 336                return;
 337        }
 338
 339        debug_putstr("Parsing ELF... ");
 340
 341        phdrs = malloc(sizeof(*phdrs) * ehdr.e_phnum);
 342        if (!phdrs)
 343                error("Failed to allocate space for phdrs");
 344
 345        memcpy(phdrs, output + ehdr.e_phoff, sizeof(*phdrs) * ehdr.e_phnum);
 346
 347        for (i = 0; i < ehdr.e_phnum; i++) {
 348                phdr = &phdrs[i];
 349
 350                switch (phdr->p_type) {
 351                case PT_LOAD:
 352#ifdef CONFIG_RELOCATABLE
 353                        dest = output;
 354                        dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
 355#else
 356                        dest = (void *)(phdr->p_paddr);
 357#endif
 358                        memcpy(dest,
 359                               output + phdr->p_offset,
 360                               phdr->p_filesz);
 361                        break;
 362                default: /* Ignore other PT_* */ break;
 363                }
 364        }
 365
 366        free(phdrs);
 367}
 368
 369asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
 370                                  unsigned char *input_data,
 371                                  unsigned long input_len,
 372                                  unsigned char *output,
 373                                  unsigned long output_len,
 374                                  unsigned long run_size)
 375{
 376        unsigned char *output_orig = output;
 377
 378        real_mode = rmode;
 379
 380        sanitize_boot_params(real_mode);
 381
 382        if (real_mode->screen_info.orig_video_mode == 7) {
 383                vidmem = (char *) 0xb0000;
 384                vidport = 0x3b4;
 385        } else {
 386                vidmem = (char *) 0xb8000;
 387                vidport = 0x3d4;
 388        }
 389
 390        lines = real_mode->screen_info.orig_video_lines;
 391        cols = real_mode->screen_info.orig_video_cols;
 392
 393        console_init();
 394        debug_putstr("early console in decompress_kernel\n");
 395
 396        free_mem_ptr     = heap;        /* Heap */
 397        free_mem_end_ptr = heap + BOOT_HEAP_SIZE;
 398
 399        /*
 400         * The memory hole needed for the kernel is the larger of either
 401         * the entire decompressed kernel plus relocation table, or the
 402         * entire decompressed kernel plus .bss and .brk sections.
 403         */
 404        output = choose_kernel_location(input_data, input_len, output,
 405                                        output_len > run_size ? output_len
 406                                                              : run_size);
 407
 408        /* Validate memory location choices. */
 409        if ((unsigned long)output & (MIN_KERNEL_ALIGN - 1))
 410                error("Destination address inappropriately aligned");
 411#ifdef CONFIG_X86_64
 412        if (heap > 0x3fffffffffffUL)
 413                error("Destination address too large");
 414#else
 415        if (heap > ((-__PAGE_OFFSET-(128<<20)-1) & 0x7fffffff))
 416                error("Destination address too large");
 417#endif
 418#ifndef CONFIG_RELOCATABLE
 419        if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
 420                error("Wrong destination address");
 421#endif
 422
 423        debug_putstr("\nDecompressing Linux... ");
 424        decompress(input_data, input_len, NULL, NULL, output, NULL, error);
 425        parse_elf(output);
 426        /*
 427         * 32-bit always performs relocations. 64-bit relocations are only
 428         * needed if kASLR has chosen a different load address.
 429         */
 430        if (!IS_ENABLED(CONFIG_X86_64) || output != output_orig)
 431                handle_relocations(output, output_len);
 432        debug_putstr("done.\nBooting the kernel.\n");
 433        return output;
 434}
 435