1/*P:600 2 * The x86 architecture has segments, which involve a table of descriptors 3 * which can be used to do funky things with virtual address interpretation. 4 * We originally used to use segments so the Guest couldn't alter the 5 * Guest<->Host Switcher, and then we had to trim Guest segments, and restore 6 * for userspace per-thread segments, but trim again for on userspace->kernel 7 * transitions... This nightmarish creation was contained within this file, 8 * where we knew not to tread without heavy armament and a change of underwear. 9 * 10 * In these modern times, the segment handling code consists of simple sanity 11 * checks, and the worst you'll experience reading this code is butterfly-rash 12 * from frolicking through its parklike serenity. 13:*/ 14#include "lg.h" 15 16/*H:600 17 * Segments & The Global Descriptor Table 18 * 19 * (That title sounds like a bad Nerdcore group. Not to suggest that there are 20 * any good Nerdcore groups, but in high school a friend of mine had a band 21 * called Joe Fish and the Chips, so there are definitely worse band names). 22 * 23 * To refresh: the GDT is a table of 8-byte values describing segments. Once 24 * set up, these segments can be loaded into one of the 6 "segment registers". 25 * 26 * GDT entries are passed around as "struct desc_struct"s, which like IDT 27 * entries are split into two 32-bit members, "a" and "b". One day, someone 28 * will clean that up, and be declared a Hero. (No pressure, I'm just saying). 29 * 30 * Anyway, the GDT entry contains a base (the start address of the segment), a 31 * limit (the size of the segment - 1), and some flags. Sounds simple, and it 32 * would be, except those zany Intel engineers decided that it was too boring 33 * to put the base at one end, the limit at the other, and the flags in 34 * between. They decided to shotgun the bits at random throughout the 8 bytes, 35 * like so: 36 * 37 * 0 16 40 48 52 56 63 38 * [ limit part 1 ][ base part 1 ][ flags ][li][fl][base ] 39 * mit ags part 2 40 * part 2 41 * 42 * As a result, this file contains a certain amount of magic numeracy. Let's 43 * begin. 44 */ 45 46/* 47 * There are several entries we don't let the Guest set. The TSS entry is the 48 * "Task State Segment" which controls all kinds of delicate things. The 49 * LGUEST_CS and LGUEST_DS entries are reserved for the Switcher, and the 50 * the Guest can't be trusted to deal with double faults. 51 */ 52static bool ignored_gdt(unsigned int num) 53{ 54 return (num == GDT_ENTRY_TSS 55 || num == GDT_ENTRY_LGUEST_CS 56 || num == GDT_ENTRY_LGUEST_DS 57 || num == GDT_ENTRY_DOUBLEFAULT_TSS); 58} 59 60/*H:630 61 * Once the Guest gave us new GDT entries, we fix them up a little. We 62 * don't care if they're invalid: the worst that can happen is a General 63 * Protection Fault in the Switcher when it restores a Guest segment register 64 * which tries to use that entry. Then we kill the Guest for causing such a 65 * mess: the message will be "unhandled trap 256". 66 */ 67static void fixup_gdt_table(struct lg_cpu *cpu, unsigned start, unsigned end) 68{ 69 unsigned int i; 70 71 for (i = start; i < end; i++) { 72 /* 73 * We never copy these ones to real GDT, so we don't care what 74 * they say 75 */ 76 if (ignored_gdt(i)) 77 continue; 78 79 /* 80 * Segment descriptors contain a privilege level: the Guest is 81 * sometimes careless and leaves this as 0, even though it's 82 * running at privilege level 1. If so, we fix it here. 83 */ 84 if (cpu->arch.gdt[i].dpl == 0) 85 cpu->arch.gdt[i].dpl |= GUEST_PL; 86 87 /* 88 * Each descriptor has an "accessed" bit. If we don't set it 89 * now, the CPU will try to set it when the Guest first loads 90 * that entry into a segment register. But the GDT isn't 91 * writable by the Guest, so bad things can happen. 92 */ 93 cpu->arch.gdt[i].type |= 0x1; 94 } 95} 96 97/*H:610 98 * Like the IDT, we never simply use the GDT the Guest gives us. We keep 99 * a GDT for each CPU, and copy across the Guest's entries each time we want to 100 * run the Guest on that CPU. 101 * 102 * This routine is called at boot or modprobe time for each CPU to set up the 103 * constant GDT entries: the ones which are the same no matter what Guest we're 104 * running. 105 */ 106void setup_default_gdt_entries(struct lguest_ro_state *state) 107{ 108 struct desc_struct *gdt = state->guest_gdt; 109 unsigned long tss = (unsigned long)&state->guest_tss; 110 111 /* The Switcher segments are full 0-4G segments, privilege level 0 */ 112 gdt[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT; 113 gdt[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT; 114 115 /* 116 * The TSS segment refers to the TSS entry for this particular CPU. 117 */ 118 gdt[GDT_ENTRY_TSS].a = 0; 119 gdt[GDT_ENTRY_TSS].b = 0; 120 121 gdt[GDT_ENTRY_TSS].limit0 = 0x67; 122 gdt[GDT_ENTRY_TSS].base0 = tss & 0xFFFF; 123 gdt[GDT_ENTRY_TSS].base1 = (tss >> 16) & 0xFF; 124 gdt[GDT_ENTRY_TSS].base2 = tss >> 24; 125 gdt[GDT_ENTRY_TSS].type = 0x9; /* 32-bit TSS (available) */ 126 gdt[GDT_ENTRY_TSS].p = 0x1; /* Entry is present */ 127 gdt[GDT_ENTRY_TSS].dpl = 0x0; /* Privilege level 0 */ 128 gdt[GDT_ENTRY_TSS].s = 0x0; /* system segment */ 129 130} 131 132/* 133 * This routine sets up the initial Guest GDT for booting. All entries start 134 * as 0 (unusable). 135 */ 136void setup_guest_gdt(struct lg_cpu *cpu) 137{ 138 /* 139 * Start with full 0-4G segments...except the Guest is allowed to use 140 * them, so set the privilege level appropriately in the flags. 141 */ 142 cpu->arch.gdt[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT; 143 cpu->arch.gdt[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT; 144 cpu->arch.gdt[GDT_ENTRY_KERNEL_CS].dpl |= GUEST_PL; 145 cpu->arch.gdt[GDT_ENTRY_KERNEL_DS].dpl |= GUEST_PL; 146} 147 148/*H:650 149 * An optimization of copy_gdt(), for just the three "thead-local storage" 150 * entries. 151 */ 152void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt) 153{ 154 unsigned int i; 155 156 for (i = GDT_ENTRY_TLS_MIN; i <= GDT_ENTRY_TLS_MAX; i++) 157 gdt[i] = cpu->arch.gdt[i]; 158} 159 160/*H:640 161 * When the Guest is run on a different CPU, or the GDT entries have changed, 162 * copy_gdt() is called to copy the Guest's GDT entries across to this CPU's 163 * GDT. 164 */ 165void copy_gdt(const struct lg_cpu *cpu, struct desc_struct *gdt) 166{ 167 unsigned int i; 168 169 /* 170 * The default entries from setup_default_gdt_entries() are not 171 * replaced. See ignored_gdt() above. 172 */ 173 for (i = 0; i < GDT_ENTRIES; i++) 174 if (!ignored_gdt(i)) 175 gdt[i] = cpu->arch.gdt[i]; 176} 177 178/*H:620 179 * This is where the Guest asks us to load a new GDT entry 180 * (LHCALL_LOAD_GDT_ENTRY). We tweak the entry and copy it in. 181 */ 182void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi) 183{ 184 /* 185 * We assume the Guest has the same number of GDT entries as the 186 * Host, otherwise we'd have to dynamically allocate the Guest GDT. 187 */ 188 if (num >= ARRAY_SIZE(cpu->arch.gdt)) { 189 kill_guest(cpu, "too many gdt entries %i", num); 190 return; 191 } 192 193 /* Set it up, then fix it. */ 194 cpu->arch.gdt[num].a = lo; 195 cpu->arch.gdt[num].b = hi; 196 fixup_gdt_table(cpu, num, num+1); 197 /* 198 * Mark that the GDT changed so the core knows it has to copy it again, 199 * even if the Guest is run on the same CPU. 200 */ 201 cpu->changed |= CHANGED_GDT; 202} 203 204/* 205 * This is the fast-track version for just changing the three TLS entries. 206 * Remember that this happens on every context switch, so it's worth 207 * optimizing. But wouldn't it be neater to have a single hypercall to cover 208 * both cases? 209 */ 210void guest_load_tls(struct lg_cpu *cpu, unsigned long gtls) 211{ 212 struct desc_struct *tls = &cpu->arch.gdt[GDT_ENTRY_TLS_MIN]; 213 214 __lgread(cpu, tls, gtls, sizeof(*tls)*GDT_ENTRY_TLS_ENTRIES); 215 fixup_gdt_table(cpu, GDT_ENTRY_TLS_MIN, GDT_ENTRY_TLS_MAX+1); 216 /* Note that just the TLS entries have changed. */ 217 cpu->changed |= CHANGED_GDT_TLS; 218} 219 220/*H:660 221 * With this, we have finished the Host. 222 * 223 * Five of the seven parts of our task are complete. You have made it through 224 * the Bit of Despair (I think that's somewhere in the page table code, 225 * myself). 226 * 227 * Next, we examine "make Switcher". It's short, but intense. 228 */ 229