1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#include <linux/capability.h>
32#include <linux/errno.h>
33#include <linux/interrupt.h>
34#include <linux/sched.h>
35#include <linux/kernel.h>
36#include <linux/signal.h>
37#include <linux/string.h>
38#include <linux/mm.h>
39#include <linux/smp.h>
40#include <linux/highmem.h>
41#include <linux/ptrace.h>
42#include <linux/audit.h>
43#include <linux/stddef.h>
44
45#include <asm/uaccess.h>
46#include <asm/io.h>
47#include <asm/tlbflush.h>
48#include <asm/irq.h>
49#include <asm/syscalls.h>
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67#define KVM86 ((struct kernel_vm86_struct *)regs)
68#define VMPI KVM86->vm86plus
69
70
71
72
73
74#define AL(regs) (((unsigned char *)&((regs)->pt.ax))[0])
75#define AH(regs) (((unsigned char *)&((regs)->pt.ax))[1])
76#define IP(regs) (*(unsigned short *)&((regs)->pt.ip))
77#define SP(regs) (*(unsigned short *)&((regs)->pt.sp))
78
79
80
81
82#define VFLAGS (*(unsigned short *)&(current->thread.v86flags))
83#define VEFLAGS (current->thread.v86flags)
84
85#define set_flags(X, new, mask) \
86((X) = ((X) & ~(mask)) | ((new) & (mask)))
87
88#define SAFE_MASK (0xDD5)
89#define RETURN_MASK (0xDFF)
90
91
92static int copy_vm86_regs_to_user(struct vm86_regs __user *user,
93 const struct kernel_vm86_regs *regs)
94{
95 int ret = 0;
96
97
98
99
100
101 ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_ax));
102 ret += copy_to_user(&user->orig_eax, ®s->pt.orig_ax,
103 sizeof(struct kernel_vm86_regs) -
104 offsetof(struct kernel_vm86_regs, pt.orig_ax));
105
106 return ret;
107}
108
109
110static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
111 const struct vm86_regs __user *user,
112 unsigned extra)
113{
114 int ret = 0;
115
116
117 ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_ax));
118
119 ret += copy_from_user(®s->pt.orig_ax, &user->orig_eax,
120 sizeof(struct kernel_vm86_regs) -
121 offsetof(struct kernel_vm86_regs, pt.orig_ax) +
122 extra);
123 return ret;
124}
125
126struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
127{
128 struct tss_struct *tss;
129 struct pt_regs *ret;
130 unsigned long tmp;
131
132
133
134
135
136
137 local_irq_enable();
138
139 if (!current->thread.vm86_info) {
140 printk("no vm86_info: BAD\n");
141 do_exit(SIGSEGV);
142 }
143 set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | current->thread.v86mask);
144 tmp = copy_vm86_regs_to_user(¤t->thread.vm86_info->regs, regs);
145 tmp += put_user(current->thread.screen_bitmap, ¤t->thread.vm86_info->screen_bitmap);
146 if (tmp) {
147 printk("vm86: could not access userspace vm86_info\n");
148 do_exit(SIGSEGV);
149 }
150
151 tss = &per_cpu(init_tss, get_cpu());
152 current->thread.sp0 = current->thread.saved_sp0;
153 current->thread.sysenter_cs = __KERNEL_CS;
154 load_sp0(tss, ¤t->thread);
155 current->thread.saved_sp0 = 0;
156 put_cpu();
157
158 ret = KVM86->regs32;
159
160 ret->fs = current->thread.saved_fs;
161 set_user_gs(ret, current->thread.saved_gs);
162
163 return ret;
164}
165
166static void mark_screen_rdonly(struct mm_struct *mm)
167{
168 pgd_t *pgd;
169 pud_t *pud;
170 pmd_t *pmd;
171 pte_t *pte;
172 spinlock_t *ptl;
173 int i;
174
175 pgd = pgd_offset(mm, 0xA0000);
176 if (pgd_none_or_clear_bad(pgd))
177 goto out;
178 pud = pud_offset(pgd, 0xA0000);
179 if (pud_none_or_clear_bad(pud))
180 goto out;
181 pmd = pmd_offset(pud, 0xA0000);
182 if (pmd_none_or_clear_bad(pmd))
183 goto out;
184 pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
185 for (i = 0; i < 32; i++) {
186 if (pte_present(*pte))
187 set_pte(pte, pte_wrprotect(*pte));
188 pte++;
189 }
190 pte_unmap_unlock(pte, ptl);
191out:
192 flush_tlb();
193}
194
195
196
197static int do_vm86_irq_handling(int subfunction, int irqnumber);
198static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk);
199
200int sys_vm86old(struct pt_regs *regs)
201{
202 struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs->bx;
203 struct kernel_vm86_struct info;
204
205
206
207
208 struct task_struct *tsk;
209 int tmp, ret = -EPERM;
210
211 tsk = current;
212 if (tsk->thread.saved_sp0)
213 goto out;
214 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
215 offsetof(struct kernel_vm86_struct, vm86plus) -
216 sizeof(info.regs));
217 ret = -EFAULT;
218 if (tmp)
219 goto out;
220 memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus);
221 info.regs32 = regs;
222 tsk->thread.vm86_info = v86;
223 do_sys_vm86(&info, tsk);
224 ret = 0;
225out:
226 return ret;
227}
228
229
230int sys_vm86(struct pt_regs *regs)
231{
232 struct kernel_vm86_struct info;
233
234
235
236
237 struct task_struct *tsk;
238 int tmp, ret;
239 struct vm86plus_struct __user *v86;
240
241 tsk = current;
242 switch (regs->bx) {
243 case VM86_REQUEST_IRQ:
244 case VM86_FREE_IRQ:
245 case VM86_GET_IRQ_BITS:
246 case VM86_GET_AND_RESET_IRQ:
247 ret = do_vm86_irq_handling(regs->bx, (int)regs->cx);
248 goto out;
249 case VM86_PLUS_INSTALL_CHECK:
250
251
252
253
254
255
256 ret = 0;
257 goto out;
258 }
259
260
261 ret = -EPERM;
262 if (tsk->thread.saved_sp0)
263 goto out;
264 v86 = (struct vm86plus_struct __user *)regs->cx;
265 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
266 offsetof(struct kernel_vm86_struct, regs32) -
267 sizeof(info.regs));
268 ret = -EFAULT;
269 if (tmp)
270 goto out;
271 info.regs32 = regs;
272 info.vm86plus.is_vm86pus = 1;
273 tsk->thread.vm86_info = (struct vm86_struct __user *)v86;
274 do_sys_vm86(&info, tsk);
275 ret = 0;
276out:
277 return ret;
278}
279
280
281static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk)
282{
283 struct tss_struct *tss;
284
285
286
287 info->regs.pt.ds = 0;
288 info->regs.pt.es = 0;
289 info->regs.pt.fs = 0;
290#ifndef CONFIG_X86_32_LAZY_GS
291 info->regs.pt.gs = 0;
292#endif
293
294
295
296
297
298
299 VEFLAGS = info->regs.pt.flags;
300 info->regs.pt.flags &= SAFE_MASK;
301 info->regs.pt.flags |= info->regs32->flags & ~SAFE_MASK;
302 info->regs.pt.flags |= X86_VM_MASK;
303
304 switch (info->cpu_type) {
305 case CPU_286:
306 tsk->thread.v86mask = 0;
307 break;
308 case CPU_386:
309 tsk->thread.v86mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL;
310 break;
311 case CPU_486:
312 tsk->thread.v86mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
313 break;
314 default:
315 tsk->thread.v86mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
316 break;
317 }
318
319
320
321
322 info->regs32->ax = VM86_SIGNAL;
323 tsk->thread.saved_sp0 = tsk->thread.sp0;
324 tsk->thread.saved_fs = info->regs32->fs;
325 tsk->thread.saved_gs = get_user_gs(info->regs32);
326
327 tss = &per_cpu(init_tss, get_cpu());
328 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
329 if (cpu_has_sep)
330 tsk->thread.sysenter_cs = 0;
331 load_sp0(tss, &tsk->thread);
332 put_cpu();
333
334 tsk->thread.screen_bitmap = info->screen_bitmap;
335 if (info->flags & VM86_SCREEN_BITMAP)
336 mark_screen_rdonly(tsk->mm);
337
338
339 if (unlikely(current->audit_context))
340 audit_syscall_exit(AUDITSC_RESULT(0), 0);
341
342 __asm__ __volatile__(
343 "movl %0,%%esp\n\t"
344 "movl %1,%%ebp\n\t"
345#ifdef CONFIG_X86_32_LAZY_GS
346 "mov %2, %%gs\n\t"
347#endif
348 "jmp resume_userspace"
349 :
350 :"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0));
351
352}
353
354static inline void return_to_32bit(struct kernel_vm86_regs *regs16, int retval)
355{
356 struct pt_regs *regs32;
357
358 regs32 = save_v86_state(regs16);
359 regs32->ax = retval;
360 __asm__ __volatile__("movl %0,%%esp\n\t"
361 "movl %1,%%ebp\n\t"
362 "jmp resume_userspace"
363 : : "r" (regs32), "r" (current_thread_info()));
364}
365
366static inline void set_IF(struct kernel_vm86_regs *regs)
367{
368 VEFLAGS |= X86_EFLAGS_VIF;
369 if (VEFLAGS & X86_EFLAGS_VIP)
370 return_to_32bit(regs, VM86_STI);
371}
372
373static inline void clear_IF(struct kernel_vm86_regs *regs)
374{
375 VEFLAGS &= ~X86_EFLAGS_VIF;
376}
377
378static inline void clear_TF(struct kernel_vm86_regs *regs)
379{
380 regs->pt.flags &= ~X86_EFLAGS_TF;
381}
382
383static inline void clear_AC(struct kernel_vm86_regs *regs)
384{
385 regs->pt.flags &= ~X86_EFLAGS_AC;
386}
387
388
389
390
391
392
393
394
395
396
397
398
399
400static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs)
401{
402 set_flags(VEFLAGS, flags, current->thread.v86mask);
403 set_flags(regs->pt.flags, flags, SAFE_MASK);
404 if (flags & X86_EFLAGS_IF)
405 set_IF(regs);
406 else
407 clear_IF(regs);
408}
409
410static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs)
411{
412 set_flags(VFLAGS, flags, current->thread.v86mask);
413 set_flags(regs->pt.flags, flags, SAFE_MASK);
414 if (flags & X86_EFLAGS_IF)
415 set_IF(regs);
416 else
417 clear_IF(regs);
418}
419
420static inline unsigned long get_vflags(struct kernel_vm86_regs *regs)
421{
422 unsigned long flags = regs->pt.flags & RETURN_MASK;
423
424 if (VEFLAGS & X86_EFLAGS_VIF)
425 flags |= X86_EFLAGS_IF;
426 flags |= X86_EFLAGS_IOPL;
427 return flags | (VEFLAGS & current->thread.v86mask);
428}
429
430static inline int is_revectored(int nr, struct revectored_struct *bitmap)
431{
432 __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0"
433 :"=r" (nr)
434 :"m" (*bitmap), "r" (nr));
435 return nr;
436}
437
438#define val_byte(val, n) (((__u8 *)&val)[n])
439
440#define pushb(base, ptr, val, err_label) \
441 do { \
442 __u8 __val = val; \
443 ptr--; \
444 if (put_user(__val, base + ptr) < 0) \
445 goto err_label; \
446 } while (0)
447
448#define pushw(base, ptr, val, err_label) \
449 do { \
450 __u16 __val = val; \
451 ptr--; \
452 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
453 goto err_label; \
454 ptr--; \
455 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
456 goto err_label; \
457 } while (0)
458
459#define pushl(base, ptr, val, err_label) \
460 do { \
461 __u32 __val = val; \
462 ptr--; \
463 if (put_user(val_byte(__val, 3), base + ptr) < 0) \
464 goto err_label; \
465 ptr--; \
466 if (put_user(val_byte(__val, 2), base + ptr) < 0) \
467 goto err_label; \
468 ptr--; \
469 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
470 goto err_label; \
471 ptr--; \
472 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
473 goto err_label; \
474 } while (0)
475
476#define popb(base, ptr, err_label) \
477 ({ \
478 __u8 __res; \
479 if (get_user(__res, base + ptr) < 0) \
480 goto err_label; \
481 ptr++; \
482 __res; \
483 })
484
485#define popw(base, ptr, err_label) \
486 ({ \
487 __u16 __res; \
488 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
489 goto err_label; \
490 ptr++; \
491 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
492 goto err_label; \
493 ptr++; \
494 __res; \
495 })
496
497#define popl(base, ptr, err_label) \
498 ({ \
499 __u32 __res; \
500 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
501 goto err_label; \
502 ptr++; \
503 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
504 goto err_label; \
505 ptr++; \
506 if (get_user(val_byte(__res, 2), base + ptr) < 0) \
507 goto err_label; \
508 ptr++; \
509 if (get_user(val_byte(__res, 3), base + ptr) < 0) \
510 goto err_label; \
511 ptr++; \
512 __res; \
513 })
514
515
516
517
518
519
520static void do_int(struct kernel_vm86_regs *regs, int i,
521 unsigned char __user *ssp, unsigned short sp)
522{
523 unsigned long __user *intr_ptr;
524 unsigned long segoffs;
525
526 if (regs->pt.cs == BIOSSEG)
527 goto cannot_handle;
528 if (is_revectored(i, &KVM86->int_revectored))
529 goto cannot_handle;
530 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
531 goto cannot_handle;
532 intr_ptr = (unsigned long __user *) (i << 2);
533 if (get_user(segoffs, intr_ptr))
534 goto cannot_handle;
535 if ((segoffs >> 16) == BIOSSEG)
536 goto cannot_handle;
537 pushw(ssp, sp, get_vflags(regs), cannot_handle);
538 pushw(ssp, sp, regs->pt.cs, cannot_handle);
539 pushw(ssp, sp, IP(regs), cannot_handle);
540 regs->pt.cs = segoffs >> 16;
541 SP(regs) -= 6;
542 IP(regs) = segoffs & 0xffff;
543 clear_TF(regs);
544 clear_IF(regs);
545 clear_AC(regs);
546 return;
547
548cannot_handle:
549 return_to_32bit(regs, VM86_INTx + (i << 8));
550}
551
552int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
553{
554 if (VMPI.is_vm86pus) {
555 if ((trapno == 3) || (trapno == 1))
556 return_to_32bit(regs, VM86_TRAP + (trapno << 8));
557 do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
558 return 0;
559 }
560 if (trapno != 1)
561 return 1;
562 current->thread.trap_no = trapno;
563 current->thread.error_code = error_code;
564 force_sig(SIGTRAP, current);
565 return 0;
566}
567
568void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
569{
570 unsigned char opcode;
571 unsigned char __user *csp;
572 unsigned char __user *ssp;
573 unsigned short ip, sp, orig_flags;
574 int data32, pref_done;
575
576#define CHECK_IF_IN_TRAP \
577 if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \
578 newflags |= X86_EFLAGS_TF
579#define VM86_FAULT_RETURN do { \
580 if (VMPI.force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) \
581 return_to_32bit(regs, VM86_PICRETURN); \
582 if (orig_flags & X86_EFLAGS_TF) \
583 handle_vm86_trap(regs, 0, 1); \
584 return; } while (0)
585
586 orig_flags = *(unsigned short *)®s->pt.flags;
587
588 csp = (unsigned char __user *) (regs->pt.cs << 4);
589 ssp = (unsigned char __user *) (regs->pt.ss << 4);
590 sp = SP(regs);
591 ip = IP(regs);
592
593 data32 = 0;
594 pref_done = 0;
595 do {
596 switch (opcode = popb(csp, ip, simulate_sigsegv)) {
597 case 0x66: data32 = 1; break;
598 case 0x67: break;
599 case 0x2e: break;
600 case 0x3e: break;
601 case 0x26: break;
602 case 0x36: break;
603 case 0x65: break;
604 case 0x64: break;
605 case 0xf2: break;
606 case 0xf3: break;
607 default: pref_done = 1;
608 }
609 } while (!pref_done);
610
611 switch (opcode) {
612
613
614 case 0x9c:
615 if (data32) {
616 pushl(ssp, sp, get_vflags(regs), simulate_sigsegv);
617 SP(regs) -= 4;
618 } else {
619 pushw(ssp, sp, get_vflags(regs), simulate_sigsegv);
620 SP(regs) -= 2;
621 }
622 IP(regs) = ip;
623 VM86_FAULT_RETURN;
624
625
626 case 0x9d:
627 {
628 unsigned long newflags;
629 if (data32) {
630 newflags = popl(ssp, sp, simulate_sigsegv);
631 SP(regs) += 4;
632 } else {
633 newflags = popw(ssp, sp, simulate_sigsegv);
634 SP(regs) += 2;
635 }
636 IP(regs) = ip;
637 CHECK_IF_IN_TRAP;
638 if (data32)
639 set_vflags_long(newflags, regs);
640 else
641 set_vflags_short(newflags, regs);
642
643 VM86_FAULT_RETURN;
644 }
645
646
647 case 0xcd: {
648 int intno = popb(csp, ip, simulate_sigsegv);
649 IP(regs) = ip;
650 if (VMPI.vm86dbg_active) {
651 if ((1 << (intno & 7)) & VMPI.vm86dbg_intxxtab[intno >> 3])
652 return_to_32bit(regs, VM86_INTx + (intno << 8));
653 }
654 do_int(regs, intno, ssp, sp);
655 return;
656 }
657
658
659 case 0xcf:
660 {
661 unsigned long newip;
662 unsigned long newcs;
663 unsigned long newflags;
664 if (data32) {
665 newip = popl(ssp, sp, simulate_sigsegv);
666 newcs = popl(ssp, sp, simulate_sigsegv);
667 newflags = popl(ssp, sp, simulate_sigsegv);
668 SP(regs) += 12;
669 } else {
670 newip = popw(ssp, sp, simulate_sigsegv);
671 newcs = popw(ssp, sp, simulate_sigsegv);
672 newflags = popw(ssp, sp, simulate_sigsegv);
673 SP(regs) += 6;
674 }
675 IP(regs) = newip;
676 regs->pt.cs = newcs;
677 CHECK_IF_IN_TRAP;
678 if (data32) {
679 set_vflags_long(newflags, regs);
680 } else {
681 set_vflags_short(newflags, regs);
682 }
683 VM86_FAULT_RETURN;
684 }
685
686
687 case 0xfa:
688 IP(regs) = ip;
689 clear_IF(regs);
690 VM86_FAULT_RETURN;
691
692
693
694
695
696
697
698
699 case 0xfb:
700 IP(regs) = ip;
701 set_IF(regs);
702 VM86_FAULT_RETURN;
703
704 default:
705 return_to_32bit(regs, VM86_UNKNOWN);
706 }
707
708 return;
709
710simulate_sigsegv:
711
712
713
714
715
716
717
718
719
720
721 return_to_32bit(regs, VM86_UNKNOWN);
722}
723
724
725
726#define VM86_IRQNAME "vm86irq"
727
728static struct vm86_irqs {
729 struct task_struct *tsk;
730 int sig;
731} vm86_irqs[16];
732
733static DEFINE_SPINLOCK(irqbits_lock);
734static int irqbits;
735
736#define ALLOWED_SIGS (1 \
737 | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \
738 | (1 << SIGUNUSED))
739
740static irqreturn_t irq_handler(int intno, void *dev_id)
741{
742 int irq_bit;
743 unsigned long flags;
744
745 spin_lock_irqsave(&irqbits_lock, flags);
746 irq_bit = 1 << intno;
747 if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk)
748 goto out;
749 irqbits |= irq_bit;
750 if (vm86_irqs[intno].sig)
751 send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1);
752
753
754
755
756 disable_irq_nosync(intno);
757 spin_unlock_irqrestore(&irqbits_lock, flags);
758 return IRQ_HANDLED;
759
760out:
761 spin_unlock_irqrestore(&irqbits_lock, flags);
762 return IRQ_NONE;
763}
764
765static inline void free_vm86_irq(int irqnumber)
766{
767 unsigned long flags;
768
769 free_irq(irqnumber, NULL);
770 vm86_irqs[irqnumber].tsk = NULL;
771
772 spin_lock_irqsave(&irqbits_lock, flags);
773 irqbits &= ~(1 << irqnumber);
774 spin_unlock_irqrestore(&irqbits_lock, flags);
775}
776
777void release_vm86_irqs(struct task_struct *task)
778{
779 int i;
780 for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++)
781 if (vm86_irqs[i].tsk == task)
782 free_vm86_irq(i);
783}
784
785static inline int get_and_reset_irq(int irqnumber)
786{
787 int bit;
788 unsigned long flags;
789 int ret = 0;
790
791 if (invalid_vm86_irq(irqnumber)) return 0;
792 if (vm86_irqs[irqnumber].tsk != current) return 0;
793 spin_lock_irqsave(&irqbits_lock, flags);
794 bit = irqbits & (1 << irqnumber);
795 irqbits &= ~bit;
796 if (bit) {
797 enable_irq(irqnumber);
798 ret = 1;
799 }
800
801 spin_unlock_irqrestore(&irqbits_lock, flags);
802 return ret;
803}
804
805
806static int do_vm86_irq_handling(int subfunction, int irqnumber)
807{
808 int ret;
809 switch (subfunction) {
810 case VM86_GET_AND_RESET_IRQ: {
811 return get_and_reset_irq(irqnumber);
812 }
813 case VM86_GET_IRQ_BITS: {
814 return irqbits;
815 }
816 case VM86_REQUEST_IRQ: {
817 int sig = irqnumber >> 8;
818 int irq = irqnumber & 255;
819 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
820 if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM;
821 if (invalid_vm86_irq(irq)) return -EPERM;
822 if (vm86_irqs[irq].tsk) return -EPERM;
823 ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL);
824 if (ret) return ret;
825 vm86_irqs[irq].sig = sig;
826 vm86_irqs[irq].tsk = current;
827 return irq;
828 }
829 case VM86_FREE_IRQ: {
830 if (invalid_vm86_irq(irqnumber)) return -EPERM;
831 if (!vm86_irqs[irqnumber].tsk) return 0;
832 if (vm86_irqs[irqnumber].tsk != current) return -EPERM;
833 free_vm86_irq(irqnumber);
834 return 0;
835 }
836 }
837 return -EINVAL;
838}
839
840