linux/arch/x86/include/asm/desc.h
<<
>>
Prefs
   1#ifndef _ASM_X86_DESC_H
   2#define _ASM_X86_DESC_H
   3
   4#include <asm/desc_defs.h>
   5#include <asm/ldt.h>
   6#include <asm/mmu.h>
   7
   8#include <linux/smp.h>
   9#include <linux/percpu.h>
  10
  11static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *info)
  12{
  13        desc->limit0            = info->limit & 0x0ffff;
  14
  15        desc->base0             = (info->base_addr & 0x0000ffff);
  16        desc->base1             = (info->base_addr & 0x00ff0000) >> 16;
  17
  18        desc->type              = (info->read_exec_only ^ 1) << 1;
  19        desc->type             |= info->contents << 2;
  20
  21        desc->s                 = 1;
  22        desc->dpl               = 0x3;
  23        desc->p                 = info->seg_not_present ^ 1;
  24        desc->limit             = (info->limit & 0xf0000) >> 16;
  25        desc->avl               = info->useable;
  26        desc->d                 = info->seg_32bit;
  27        desc->g                 = info->limit_in_pages;
  28
  29        desc->base2             = (info->base_addr & 0xff000000) >> 24;
  30        /*
  31         * Don't allow setting of the lm bit. It would confuse
  32         * user_64bit_mode and would get overridden by sysret anyway.
  33         */
  34        desc->l                 = 0;
  35}
  36
  37extern struct desc_ptr idt_descr;
  38extern gate_desc idt_table[];
  39extern struct desc_ptr nmi_idt_descr;
  40extern gate_desc nmi_idt_table[];
  41
  42struct gdt_page {
  43        struct desc_struct gdt[GDT_ENTRIES];
  44} __attribute__((aligned(PAGE_SIZE)));
  45
  46DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
  47
  48static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
  49{
  50        return per_cpu(gdt_page, cpu).gdt;
  51}
  52
  53#ifdef CONFIG_X86_64
  54
  55static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func,
  56                             unsigned dpl, unsigned ist, unsigned seg)
  57{
  58        gate->offset_low        = PTR_LOW(func);
  59        gate->segment           = __KERNEL_CS;
  60        gate->ist               = ist;
  61        gate->p                 = 1;
  62        gate->dpl               = dpl;
  63        gate->zero0             = 0;
  64        gate->zero1             = 0;
  65        gate->type              = type;
  66        gate->offset_middle     = PTR_MIDDLE(func);
  67        gate->offset_high       = PTR_HIGH(func);
  68}
  69
  70#else
  71static inline void pack_gate(gate_desc *gate, unsigned char type,
  72                             unsigned long base, unsigned dpl, unsigned flags,
  73                             unsigned short seg)
  74{
  75        gate->a = (seg << 16) | (base & 0xffff);
  76        gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
  77}
  78
  79#endif
  80
  81static inline int desc_empty(const void *ptr)
  82{
  83        const u32 *desc = ptr;
  84
  85        return !(desc[0] | desc[1]);
  86}
  87
  88#ifdef CONFIG_PARAVIRT
  89#include <asm/paravirt.h>
  90#else
  91#define load_TR_desc()                          native_load_tr_desc()
  92#define load_gdt(dtr)                           native_load_gdt(dtr)
  93#define load_idt(dtr)                           native_load_idt(dtr)
  94#define load_tr(tr)                             asm volatile("ltr %0"::"m" (tr))
  95#define load_ldt(ldt)                           asm volatile("lldt %0"::"m" (ldt))
  96
  97#define store_gdt(dtr)                          native_store_gdt(dtr)
  98#define store_idt(dtr)                          native_store_idt(dtr)
  99#define store_tr(tr)                            (tr = native_store_tr())
 100
 101#define load_TLS(t, cpu)                        native_load_tls(t, cpu)
 102#define set_ldt                                 native_set_ldt
 103
 104#define write_ldt_entry(dt, entry, desc)        native_write_ldt_entry(dt, entry, desc)
 105#define write_gdt_entry(dt, entry, desc, type)  native_write_gdt_entry(dt, entry, desc, type)
 106#define write_idt_entry(dt, entry, g)           native_write_idt_entry(dt, entry, g)
 107
 108static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
 109{
 110}
 111
 112static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
 113{
 114}
 115#endif  /* CONFIG_PARAVIRT */
 116
 117#define store_ldt(ldt) asm("sldt %0" : "=m"(ldt))
 118
 119static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
 120{
 121        memcpy(&idt[entry], gate, sizeof(*gate));
 122}
 123
 124static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
 125{
 126        memcpy(&ldt[entry], desc, 8);
 127}
 128
 129static inline void
 130native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int type)
 131{
 132        unsigned int size;
 133
 134        switch (type) {
 135        case DESC_TSS:  size = sizeof(tss_desc);        break;
 136        case DESC_LDT:  size = sizeof(ldt_desc);        break;
 137        default:        size = sizeof(*gdt);            break;
 138        }
 139
 140        memcpy(&gdt[entry], desc, size);
 141}
 142
 143static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
 144                                   unsigned long limit, unsigned char type,
 145                                   unsigned char flags)
 146{
 147        desc->a = ((base & 0xffff) << 16) | (limit & 0xffff);
 148        desc->b = (base & 0xff000000) | ((base & 0xff0000) >> 16) |
 149                (limit & 0x000f0000) | ((type & 0xff) << 8) |
 150                ((flags & 0xf) << 20);
 151        desc->p = 1;
 152}
 153
 154
 155static inline void set_tssldt_descriptor(void *d, unsigned long addr, unsigned type, unsigned size)
 156{
 157#ifdef CONFIG_X86_64
 158        struct ldttss_desc64 *desc = d;
 159
 160        memset(desc, 0, sizeof(*desc));
 161
 162        desc->limit0            = size & 0xFFFF;
 163        desc->base0             = PTR_LOW(addr);
 164        desc->base1             = PTR_MIDDLE(addr) & 0xFF;
 165        desc->type              = type;
 166        desc->p                 = 1;
 167        desc->limit1            = (size >> 16) & 0xF;
 168        desc->base2             = (PTR_MIDDLE(addr) >> 8) & 0xFF;
 169        desc->base3             = PTR_HIGH(addr);
 170#else
 171        pack_descriptor((struct desc_struct *)d, addr, size, 0x80 | type, 0);
 172#endif
 173}
 174
 175static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr)
 176{
 177        struct desc_struct *d = get_cpu_gdt_table(cpu);
 178        tss_desc tss;
 179
 180        /*
 181         * sizeof(unsigned long) coming from an extra "long" at the end
 182         * of the iobitmap. See tss_struct definition in processor.h
 183         *
 184         * -1? seg base+limit should be pointing to the address of the
 185         * last valid byte
 186         */
 187        set_tssldt_descriptor(&tss, (unsigned long)addr, DESC_TSS,
 188                              IO_BITMAP_OFFSET + IO_BITMAP_BYTES +
 189                              sizeof(unsigned long) - 1);
 190        write_gdt_entry(d, entry, &tss, DESC_TSS);
 191}
 192
 193#define set_tss_desc(cpu, addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
 194
 195static inline void native_set_ldt(const void *addr, unsigned int entries)
 196{
 197        if (likely(entries == 0))
 198                asm volatile("lldt %w0"::"q" (0));
 199        else {
 200                unsigned cpu = smp_processor_id();
 201                ldt_desc ldt;
 202
 203                set_tssldt_descriptor(&ldt, (unsigned long)addr, DESC_LDT,
 204                                      entries * LDT_ENTRY_SIZE - 1);
 205                write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT,
 206                                &ldt, DESC_LDT);
 207                asm volatile("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
 208        }
 209}
 210
 211static inline void native_load_tr_desc(void)
 212{
 213        asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
 214}
 215
 216static inline void native_load_gdt(const struct desc_ptr *dtr)
 217{
 218        asm volatile("lgdt %0"::"m" (*dtr));
 219}
 220
 221static inline void native_load_idt(const struct desc_ptr *dtr)
 222{
 223        asm volatile("lidt %0"::"m" (*dtr));
 224}
 225
 226static inline void native_store_gdt(struct desc_ptr *dtr)
 227{
 228        asm volatile("sgdt %0":"=m" (*dtr));
 229}
 230
 231static inline void native_store_idt(struct desc_ptr *dtr)
 232{
 233        asm volatile("sidt %0":"=m" (*dtr));
 234}
 235
 236static inline unsigned long native_store_tr(void)
 237{
 238        unsigned long tr;
 239
 240        asm volatile("str %0":"=r" (tr));
 241
 242        return tr;
 243}
 244
 245static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
 246{
 247        struct desc_struct *gdt = get_cpu_gdt_table(cpu);
 248        unsigned int i;
 249
 250        for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
 251                gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
 252}
 253
 254#define _LDT_empty(info)                                \
 255        ((info)->base_addr              == 0    &&      \
 256         (info)->limit                  == 0    &&      \
 257         (info)->contents               == 0    &&      \
 258         (info)->read_exec_only         == 1    &&      \
 259         (info)->seg_32bit              == 0    &&      \
 260         (info)->limit_in_pages         == 0    &&      \
 261         (info)->seg_not_present        == 1    &&      \
 262         (info)->useable                == 0)
 263
 264#ifdef CONFIG_X86_64
 265#define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0))
 266#else
 267#define LDT_empty(info) (_LDT_empty(info))
 268#endif
 269
 270static inline void clear_LDT(void)
 271{
 272        set_ldt(NULL, 0);
 273}
 274
 275/*
 276 * load one particular LDT into the current CPU
 277 */
 278static inline void load_LDT_nolock(mm_context_t *pc)
 279{
 280        set_ldt(pc->ldt, pc->size);
 281}
 282
 283static inline void load_LDT(mm_context_t *pc)
 284{
 285        preempt_disable();
 286        load_LDT_nolock(pc);
 287        preempt_enable();
 288}
 289
 290static inline unsigned long get_desc_base(const struct desc_struct *desc)
 291{
 292        return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
 293}
 294
 295static inline void set_desc_base(struct desc_struct *desc, unsigned long base)
 296{
 297        desc->base0 = base & 0xffff;
 298        desc->base1 = (base >> 16) & 0xff;
 299        desc->base2 = (base >> 24) & 0xff;
 300}
 301
 302static inline unsigned long get_desc_limit(const struct desc_struct *desc)
 303{
 304        return desc->limit0 | (desc->limit << 16);
 305}
 306
 307static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
 308{
 309        desc->limit0 = limit & 0xffff;
 310        desc->limit = (limit >> 16) & 0xf;
 311}
 312
 313#ifdef CONFIG_X86_64
 314static inline void set_nmi_gate(int gate, void *addr)
 315{
 316        gate_desc s;
 317
 318        pack_gate(&s, GATE_INTERRUPT, (unsigned long)addr, 0, 0, __KERNEL_CS);
 319        write_idt_entry(nmi_idt_table, gate, &s);
 320}
 321#endif
 322
 323static inline void _set_gate(int gate, unsigned type, void *addr,
 324                             unsigned dpl, unsigned ist, unsigned seg)
 325{
 326        gate_desc s;
 327
 328        pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg);
 329        /*
 330         * does not need to be atomic because it is only done once at
 331         * setup time
 332         */
 333        write_idt_entry(idt_table, gate, &s);
 334}
 335
 336/*
 337 * This needs to use 'idt_table' rather than 'idt', and
 338 * thus use the _nonmapped_ version of the IDT, as the
 339 * Pentium F0 0F bugfix can have resulted in the mapped
 340 * IDT being write-protected.
 341 */
 342static inline void set_intr_gate(unsigned int n, void *addr)
 343{
 344        BUG_ON((unsigned)n > 0xFF);
 345        _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
 346}
 347
 348extern int first_system_vector;
 349/* used_vectors is BITMAP for irq is not managed by percpu vector_irq */
 350extern unsigned long used_vectors[];
 351
 352static inline void alloc_system_vector(int vector)
 353{
 354        if (!test_bit(vector, used_vectors)) {
 355                set_bit(vector, used_vectors);
 356                if (first_system_vector > vector)
 357                        first_system_vector = vector;
 358        } else {
 359                BUG();
 360        }
 361}
 362
 363static inline void alloc_intr_gate(unsigned int n, void *addr)
 364{
 365        alloc_system_vector(n);
 366        set_intr_gate(n, addr);
 367}
 368
 369/*
 370 * This routine sets up an interrupt gate at directory privilege level 3.
 371 */
 372static inline void set_system_intr_gate(unsigned int n, void *addr)
 373{
 374        BUG_ON((unsigned)n > 0xFF);
 375        _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
 376}
 377
 378static inline void set_system_trap_gate(unsigned int n, void *addr)
 379{
 380        BUG_ON((unsigned)n > 0xFF);
 381        _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
 382}
 383
 384static inline void set_trap_gate(unsigned int n, void *addr)
 385{
 386        BUG_ON((unsigned)n > 0xFF);
 387        _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
 388}
 389
 390static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
 391{
 392        BUG_ON((unsigned)n > 0xFF);
 393        _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
 394}
 395
 396static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
 397{
 398        BUG_ON((unsigned)n > 0xFF);
 399        _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
 400}
 401
 402static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
 403{
 404        BUG_ON((unsigned)n > 0xFF);
 405        _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
 406}
 407
 408#endif /* _ASM_X86_DESC_H */
 409