1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34#include <linux/capability.h>
35#include <linux/errno.h>
36#include <linux/interrupt.h>
37#include <linux/syscalls.h>
38#include <linux/sched.h>
39#include <linux/sched/task_stack.h>
40#include <linux/kernel.h>
41#include <linux/signal.h>
42#include <linux/string.h>
43#include <linux/mm.h>
44#include <linux/smp.h>
45#include <linux/highmem.h>
46#include <linux/ptrace.h>
47#include <linux/audit.h>
48#include <linux/stddef.h>
49#include <linux/slab.h>
50#include <linux/security.h>
51
52#include <linux/uaccess.h>
53#include <asm/io.h>
54#include <asm/tlbflush.h>
55#include <asm/irq.h>
56#include <asm/traps.h>
57#include <asm/vm86.h>
58#include <asm/switch_to.h>
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79#define AL(regs) (((unsigned char *)&((regs)->pt.ax))[0])
80#define AH(regs) (((unsigned char *)&((regs)->pt.ax))[1])
81#define IP(regs) (*(unsigned short *)&((regs)->pt.ip))
82#define SP(regs) (*(unsigned short *)&((regs)->pt.sp))
83
84
85
86
87#define VFLAGS (*(unsigned short *)&(current->thread.vm86->veflags))
88#define VEFLAGS (current->thread.vm86->veflags)
89
90#define set_flags(X, new, mask) \
91((X) = ((X) & ~(mask)) | ((new) & (mask)))
92
93#define SAFE_MASK (0xDD5)
94#define RETURN_MASK (0xDFF)
95
96void save_v86_state(struct kernel_vm86_regs *regs, int retval)
97{
98 struct task_struct *tsk = current;
99 struct vm86plus_struct __user *user;
100 struct vm86 *vm86 = current->thread.vm86;
101 long err = 0;
102
103
104
105
106
107
108 local_irq_enable();
109
110 if (!vm86 || !vm86->user_vm86) {
111 pr_alert("no user_vm86: BAD\n");
112 do_exit(SIGSEGV);
113 }
114 set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->veflags_mask);
115 user = vm86->user_vm86;
116
117 if (!access_ok(user, vm86->vm86plus.is_vm86pus ?
118 sizeof(struct vm86plus_struct) :
119 sizeof(struct vm86_struct))) {
120 pr_alert("could not access userspace vm86 info\n");
121 do_exit(SIGSEGV);
122 }
123
124 put_user_try {
125 put_user_ex(regs->pt.bx, &user->regs.ebx);
126 put_user_ex(regs->pt.cx, &user->regs.ecx);
127 put_user_ex(regs->pt.dx, &user->regs.edx);
128 put_user_ex(regs->pt.si, &user->regs.esi);
129 put_user_ex(regs->pt.di, &user->regs.edi);
130 put_user_ex(regs->pt.bp, &user->regs.ebp);
131 put_user_ex(regs->pt.ax, &user->regs.eax);
132 put_user_ex(regs->pt.ip, &user->regs.eip);
133 put_user_ex(regs->pt.cs, &user->regs.cs);
134 put_user_ex(regs->pt.flags, &user->regs.eflags);
135 put_user_ex(regs->pt.sp, &user->regs.esp);
136 put_user_ex(regs->pt.ss, &user->regs.ss);
137 put_user_ex(regs->es, &user->regs.es);
138 put_user_ex(regs->ds, &user->regs.ds);
139 put_user_ex(regs->fs, &user->regs.fs);
140 put_user_ex(regs->gs, &user->regs.gs);
141
142 put_user_ex(vm86->screen_bitmap, &user->screen_bitmap);
143 } put_user_catch(err);
144 if (err) {
145 pr_alert("could not access userspace vm86 info\n");
146 do_exit(SIGSEGV);
147 }
148
149 preempt_disable();
150 tsk->thread.sp0 = vm86->saved_sp0;
151 tsk->thread.sysenter_cs = __KERNEL_CS;
152 update_task_stack(tsk);
153 refresh_sysenter_cs(&tsk->thread);
154 vm86->saved_sp0 = 0;
155 preempt_enable();
156
157 memcpy(®s->pt, &vm86->regs32, sizeof(struct pt_regs));
158
159 lazy_load_gs(vm86->regs32.gs);
160
161 regs->pt.ax = retval;
162}
163
164static void mark_screen_rdonly(struct mm_struct *mm)
165{
166 struct vm_area_struct *vma;
167 spinlock_t *ptl;
168 pgd_t *pgd;
169 p4d_t *p4d;
170 pud_t *pud;
171 pmd_t *pmd;
172 pte_t *pte;
173 int i;
174
175 down_write(&mm->mmap_sem);
176 pgd = pgd_offset(mm, 0xA0000);
177 if (pgd_none_or_clear_bad(pgd))
178 goto out;
179 p4d = p4d_offset(pgd, 0xA0000);
180 if (p4d_none_or_clear_bad(p4d))
181 goto out;
182 pud = pud_offset(p4d, 0xA0000);
183 if (pud_none_or_clear_bad(pud))
184 goto out;
185 pmd = pmd_offset(pud, 0xA0000);
186
187 if (pmd_trans_huge(*pmd)) {
188 vma = find_vma(mm, 0xA0000);
189 split_huge_pmd(vma, pmd, 0xA0000);
190 }
191 if (pmd_none_or_clear_bad(pmd))
192 goto out;
193 pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
194 for (i = 0; i < 32; i++) {
195 if (pte_present(*pte))
196 set_pte(pte, pte_wrprotect(*pte));
197 pte++;
198 }
199 pte_unmap_unlock(pte, ptl);
200out:
201 up_write(&mm->mmap_sem);
202 flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, PAGE_SHIFT, false);
203}
204
205
206
207static int do_vm86_irq_handling(int subfunction, int irqnumber);
208static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus);
209
210SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, user_vm86)
211{
212 return do_sys_vm86((struct vm86plus_struct __user *) user_vm86, false);
213}
214
215
216SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
217{
218 switch (cmd) {
219 case VM86_REQUEST_IRQ:
220 case VM86_FREE_IRQ:
221 case VM86_GET_IRQ_BITS:
222 case VM86_GET_AND_RESET_IRQ:
223 return do_vm86_irq_handling(cmd, (int)arg);
224 case VM86_PLUS_INSTALL_CHECK:
225
226
227
228
229
230
231 return 0;
232 }
233
234
235 return do_sys_vm86((struct vm86plus_struct __user *) arg, true);
236}
237
238
239static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
240{
241 struct task_struct *tsk = current;
242 struct vm86 *vm86 = tsk->thread.vm86;
243 struct kernel_vm86_regs vm86regs;
244 struct pt_regs *regs = current_pt_regs();
245 unsigned long err = 0;
246
247 err = security_mmap_addr(0);
248 if (err) {
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267 pr_info_once("Denied a call to vm86(old) from %s[%d] (uid: %d). Set the vm.mmap_min_addr sysctl to 0 and/or adjust LSM mmap_min_addr policy to enable vm86 if you are using a vm86-based DOS emulator.\n",
268 current->comm, task_pid_nr(current),
269 from_kuid_munged(&init_user_ns, current_uid()));
270 return -EPERM;
271 }
272
273 if (!vm86) {
274 if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL)))
275 return -ENOMEM;
276 tsk->thread.vm86 = vm86;
277 }
278 if (vm86->saved_sp0)
279 return -EPERM;
280
281 if (!access_ok(user_vm86, plus ?
282 sizeof(struct vm86_struct) :
283 sizeof(struct vm86plus_struct)))
284 return -EFAULT;
285
286 memset(&vm86regs, 0, sizeof(vm86regs));
287 get_user_try {
288 unsigned short seg;
289 get_user_ex(vm86regs.pt.bx, &user_vm86->regs.ebx);
290 get_user_ex(vm86regs.pt.cx, &user_vm86->regs.ecx);
291 get_user_ex(vm86regs.pt.dx, &user_vm86->regs.edx);
292 get_user_ex(vm86regs.pt.si, &user_vm86->regs.esi);
293 get_user_ex(vm86regs.pt.di, &user_vm86->regs.edi);
294 get_user_ex(vm86regs.pt.bp, &user_vm86->regs.ebp);
295 get_user_ex(vm86regs.pt.ax, &user_vm86->regs.eax);
296 get_user_ex(vm86regs.pt.ip, &user_vm86->regs.eip);
297 get_user_ex(seg, &user_vm86->regs.cs);
298 vm86regs.pt.cs = seg;
299 get_user_ex(vm86regs.pt.flags, &user_vm86->regs.eflags);
300 get_user_ex(vm86regs.pt.sp, &user_vm86->regs.esp);
301 get_user_ex(seg, &user_vm86->regs.ss);
302 vm86regs.pt.ss = seg;
303 get_user_ex(vm86regs.es, &user_vm86->regs.es);
304 get_user_ex(vm86regs.ds, &user_vm86->regs.ds);
305 get_user_ex(vm86regs.fs, &user_vm86->regs.fs);
306 get_user_ex(vm86regs.gs, &user_vm86->regs.gs);
307
308 get_user_ex(vm86->flags, &user_vm86->flags);
309 get_user_ex(vm86->screen_bitmap, &user_vm86->screen_bitmap);
310 get_user_ex(vm86->cpu_type, &user_vm86->cpu_type);
311 } get_user_catch(err);
312 if (err)
313 return err;
314
315 if (copy_from_user(&vm86->int_revectored,
316 &user_vm86->int_revectored,
317 sizeof(struct revectored_struct)))
318 return -EFAULT;
319 if (copy_from_user(&vm86->int21_revectored,
320 &user_vm86->int21_revectored,
321 sizeof(struct revectored_struct)))
322 return -EFAULT;
323 if (plus) {
324 if (copy_from_user(&vm86->vm86plus, &user_vm86->vm86plus,
325 sizeof(struct vm86plus_info_struct)))
326 return -EFAULT;
327 vm86->vm86plus.is_vm86pus = 1;
328 } else
329 memset(&vm86->vm86plus, 0,
330 sizeof(struct vm86plus_info_struct));
331
332 memcpy(&vm86->regs32, regs, sizeof(struct pt_regs));
333 vm86->user_vm86 = user_vm86;
334
335
336
337
338
339
340 VEFLAGS = vm86regs.pt.flags;
341 vm86regs.pt.flags &= SAFE_MASK;
342 vm86regs.pt.flags |= regs->flags & ~SAFE_MASK;
343 vm86regs.pt.flags |= X86_VM_MASK;
344
345 vm86regs.pt.orig_ax = regs->orig_ax;
346
347 switch (vm86->cpu_type) {
348 case CPU_286:
349 vm86->veflags_mask = 0;
350 break;
351 case CPU_386:
352 vm86->veflags_mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL;
353 break;
354 case CPU_486:
355 vm86->veflags_mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
356 break;
357 default:
358 vm86->veflags_mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
359 break;
360 }
361
362
363
364
365 vm86->saved_sp0 = tsk->thread.sp0;
366 lazy_save_gs(vm86->regs32.gs);
367
368
369 preempt_disable();
370 tsk->thread.sp0 += 16;
371
372 if (boot_cpu_has(X86_FEATURE_SEP)) {
373 tsk->thread.sysenter_cs = 0;
374 refresh_sysenter_cs(&tsk->thread);
375 }
376
377 update_task_stack(tsk);
378 preempt_enable();
379
380 if (vm86->flags & VM86_SCREEN_BITMAP)
381 mark_screen_rdonly(tsk->mm);
382
383 memcpy((struct kernel_vm86_regs *)regs, &vm86regs, sizeof(vm86regs));
384 force_iret();
385 return regs->ax;
386}
387
388static inline void set_IF(struct kernel_vm86_regs *regs)
389{
390 VEFLAGS |= X86_EFLAGS_VIF;
391}
392
393static inline void clear_IF(struct kernel_vm86_regs *regs)
394{
395 VEFLAGS &= ~X86_EFLAGS_VIF;
396}
397
398static inline void clear_TF(struct kernel_vm86_regs *regs)
399{
400 regs->pt.flags &= ~X86_EFLAGS_TF;
401}
402
403static inline void clear_AC(struct kernel_vm86_regs *regs)
404{
405 regs->pt.flags &= ~X86_EFLAGS_AC;
406}
407
408
409
410
411
412
413
414
415
416
417
418
419
420static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs)
421{
422 set_flags(VEFLAGS, flags, current->thread.vm86->veflags_mask);
423 set_flags(regs->pt.flags, flags, SAFE_MASK);
424 if (flags & X86_EFLAGS_IF)
425 set_IF(regs);
426 else
427 clear_IF(regs);
428}
429
430static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs)
431{
432 set_flags(VFLAGS, flags, current->thread.vm86->veflags_mask);
433 set_flags(regs->pt.flags, flags, SAFE_MASK);
434 if (flags & X86_EFLAGS_IF)
435 set_IF(regs);
436 else
437 clear_IF(regs);
438}
439
440static inline unsigned long get_vflags(struct kernel_vm86_regs *regs)
441{
442 unsigned long flags = regs->pt.flags & RETURN_MASK;
443
444 if (VEFLAGS & X86_EFLAGS_VIF)
445 flags |= X86_EFLAGS_IF;
446 flags |= X86_EFLAGS_IOPL;
447 return flags | (VEFLAGS & current->thread.vm86->veflags_mask);
448}
449
450static inline int is_revectored(int nr, struct revectored_struct *bitmap)
451{
452 return test_bit(nr, bitmap->__map);
453}
454
455#define val_byte(val, n) (((__u8 *)&val)[n])
456
457#define pushb(base, ptr, val, err_label) \
458 do { \
459 __u8 __val = val; \
460 ptr--; \
461 if (put_user(__val, base + ptr) < 0) \
462 goto err_label; \
463 } while (0)
464
465#define pushw(base, ptr, val, err_label) \
466 do { \
467 __u16 __val = val; \
468 ptr--; \
469 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
470 goto err_label; \
471 ptr--; \
472 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
473 goto err_label; \
474 } while (0)
475
476#define pushl(base, ptr, val, err_label) \
477 do { \
478 __u32 __val = val; \
479 ptr--; \
480 if (put_user(val_byte(__val, 3), base + ptr) < 0) \
481 goto err_label; \
482 ptr--; \
483 if (put_user(val_byte(__val, 2), base + ptr) < 0) \
484 goto err_label; \
485 ptr--; \
486 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
487 goto err_label; \
488 ptr--; \
489 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
490 goto err_label; \
491 } while (0)
492
493#define popb(base, ptr, err_label) \
494 ({ \
495 __u8 __res; \
496 if (get_user(__res, base + ptr) < 0) \
497 goto err_label; \
498 ptr++; \
499 __res; \
500 })
501
502#define popw(base, ptr, err_label) \
503 ({ \
504 __u16 __res; \
505 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
506 goto err_label; \
507 ptr++; \
508 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
509 goto err_label; \
510 ptr++; \
511 __res; \
512 })
513
514#define popl(base, ptr, err_label) \
515 ({ \
516 __u32 __res; \
517 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
518 goto err_label; \
519 ptr++; \
520 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
521 goto err_label; \
522 ptr++; \
523 if (get_user(val_byte(__res, 2), base + ptr) < 0) \
524 goto err_label; \
525 ptr++; \
526 if (get_user(val_byte(__res, 3), base + ptr) < 0) \
527 goto err_label; \
528 ptr++; \
529 __res; \
530 })
531
532
533
534
535
536
537static void do_int(struct kernel_vm86_regs *regs, int i,
538 unsigned char __user *ssp, unsigned short sp)
539{
540 unsigned long __user *intr_ptr;
541 unsigned long segoffs;
542 struct vm86 *vm86 = current->thread.vm86;
543
544 if (regs->pt.cs == BIOSSEG)
545 goto cannot_handle;
546 if (is_revectored(i, &vm86->int_revectored))
547 goto cannot_handle;
548 if (i == 0x21 && is_revectored(AH(regs), &vm86->int21_revectored))
549 goto cannot_handle;
550 intr_ptr = (unsigned long __user *) (i << 2);
551 if (get_user(segoffs, intr_ptr))
552 goto cannot_handle;
553 if ((segoffs >> 16) == BIOSSEG)
554 goto cannot_handle;
555 pushw(ssp, sp, get_vflags(regs), cannot_handle);
556 pushw(ssp, sp, regs->pt.cs, cannot_handle);
557 pushw(ssp, sp, IP(regs), cannot_handle);
558 regs->pt.cs = segoffs >> 16;
559 SP(regs) -= 6;
560 IP(regs) = segoffs & 0xffff;
561 clear_TF(regs);
562 clear_IF(regs);
563 clear_AC(regs);
564 return;
565
566cannot_handle:
567 save_v86_state(regs, VM86_INTx + (i << 8));
568}
569
570int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
571{
572 struct vm86 *vm86 = current->thread.vm86;
573
574 if (vm86->vm86plus.is_vm86pus) {
575 if ((trapno == 3) || (trapno == 1)) {
576 save_v86_state(regs, VM86_TRAP + (trapno << 8));
577 return 0;
578 }
579 do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
580 return 0;
581 }
582 if (trapno != 1)
583 return 1;
584 current->thread.trap_nr = trapno;
585 current->thread.error_code = error_code;
586 force_sig(SIGTRAP);
587 return 0;
588}
589
590void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
591{
592 unsigned char opcode;
593 unsigned char __user *csp;
594 unsigned char __user *ssp;
595 unsigned short ip, sp, orig_flags;
596 int data32, pref_done;
597 struct vm86plus_info_struct *vmpi = ¤t->thread.vm86->vm86plus;
598
599#define CHECK_IF_IN_TRAP \
600 if (vmpi->vm86dbg_active && vmpi->vm86dbg_TFpendig) \
601 newflags |= X86_EFLAGS_TF
602
603 orig_flags = *(unsigned short *)®s->pt.flags;
604
605 csp = (unsigned char __user *) (regs->pt.cs << 4);
606 ssp = (unsigned char __user *) (regs->pt.ss << 4);
607 sp = SP(regs);
608 ip = IP(regs);
609
610 data32 = 0;
611 pref_done = 0;
612 do {
613 switch (opcode = popb(csp, ip, simulate_sigsegv)) {
614 case 0x66: data32 = 1; break;
615 case 0x67: break;
616 case 0x2e: break;
617 case 0x3e: break;
618 case 0x26: break;
619 case 0x36: break;
620 case 0x65: break;
621 case 0x64: break;
622 case 0xf2: break;
623 case 0xf3: break;
624 default: pref_done = 1;
625 }
626 } while (!pref_done);
627
628 switch (opcode) {
629
630
631 case 0x9c:
632 if (data32) {
633 pushl(ssp, sp, get_vflags(regs), simulate_sigsegv);
634 SP(regs) -= 4;
635 } else {
636 pushw(ssp, sp, get_vflags(regs), simulate_sigsegv);
637 SP(regs) -= 2;
638 }
639 IP(regs) = ip;
640 goto vm86_fault_return;
641
642
643 case 0x9d:
644 {
645 unsigned long newflags;
646 if (data32) {
647 newflags = popl(ssp, sp, simulate_sigsegv);
648 SP(regs) += 4;
649 } else {
650 newflags = popw(ssp, sp, simulate_sigsegv);
651 SP(regs) += 2;
652 }
653 IP(regs) = ip;
654 CHECK_IF_IN_TRAP;
655 if (data32)
656 set_vflags_long(newflags, regs);
657 else
658 set_vflags_short(newflags, regs);
659
660 goto check_vip;
661 }
662
663
664 case 0xcd: {
665 int intno = popb(csp, ip, simulate_sigsegv);
666 IP(regs) = ip;
667 if (vmpi->vm86dbg_active) {
668 if ((1 << (intno & 7)) & vmpi->vm86dbg_intxxtab[intno >> 3]) {
669 save_v86_state(regs, VM86_INTx + (intno << 8));
670 return;
671 }
672 }
673 do_int(regs, intno, ssp, sp);
674 return;
675 }
676
677
678 case 0xcf:
679 {
680 unsigned long newip;
681 unsigned long newcs;
682 unsigned long newflags;
683 if (data32) {
684 newip = popl(ssp, sp, simulate_sigsegv);
685 newcs = popl(ssp, sp, simulate_sigsegv);
686 newflags = popl(ssp, sp, simulate_sigsegv);
687 SP(regs) += 12;
688 } else {
689 newip = popw(ssp, sp, simulate_sigsegv);
690 newcs = popw(ssp, sp, simulate_sigsegv);
691 newflags = popw(ssp, sp, simulate_sigsegv);
692 SP(regs) += 6;
693 }
694 IP(regs) = newip;
695 regs->pt.cs = newcs;
696 CHECK_IF_IN_TRAP;
697 if (data32) {
698 set_vflags_long(newflags, regs);
699 } else {
700 set_vflags_short(newflags, regs);
701 }
702 goto check_vip;
703 }
704
705
706 case 0xfa:
707 IP(regs) = ip;
708 clear_IF(regs);
709 goto vm86_fault_return;
710
711
712
713
714
715
716
717
718 case 0xfb:
719 IP(regs) = ip;
720 set_IF(regs);
721 goto check_vip;
722
723 default:
724 save_v86_state(regs, VM86_UNKNOWN);
725 }
726
727 return;
728
729check_vip:
730 if ((VEFLAGS & (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) ==
731 (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) {
732 save_v86_state(regs, VM86_STI);
733 return;
734 }
735
736vm86_fault_return:
737 if (vmpi->force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) {
738 save_v86_state(regs, VM86_PICRETURN);
739 return;
740 }
741 if (orig_flags & X86_EFLAGS_TF)
742 handle_vm86_trap(regs, 0, X86_TRAP_DB);
743 return;
744
745simulate_sigsegv:
746
747
748
749
750
751
752
753
754
755
756 save_v86_state(regs, VM86_UNKNOWN);
757}
758
759
760
761#define VM86_IRQNAME "vm86irq"
762
763static struct vm86_irqs {
764 struct task_struct *tsk;
765 int sig;
766} vm86_irqs[16];
767
768static DEFINE_SPINLOCK(irqbits_lock);
769static int irqbits;
770
771#define ALLOWED_SIGS (1 \
772 | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \
773 | (1 << SIGUNUSED))
774
775static irqreturn_t irq_handler(int intno, void *dev_id)
776{
777 int irq_bit;
778 unsigned long flags;
779
780 spin_lock_irqsave(&irqbits_lock, flags);
781 irq_bit = 1 << intno;
782 if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk)
783 goto out;
784 irqbits |= irq_bit;
785 if (vm86_irqs[intno].sig)
786 send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1);
787
788
789
790
791 disable_irq_nosync(intno);
792 spin_unlock_irqrestore(&irqbits_lock, flags);
793 return IRQ_HANDLED;
794
795out:
796 spin_unlock_irqrestore(&irqbits_lock, flags);
797 return IRQ_NONE;
798}
799
800static inline void free_vm86_irq(int irqnumber)
801{
802 unsigned long flags;
803
804 free_irq(irqnumber, NULL);
805 vm86_irqs[irqnumber].tsk = NULL;
806
807 spin_lock_irqsave(&irqbits_lock, flags);
808 irqbits &= ~(1 << irqnumber);
809 spin_unlock_irqrestore(&irqbits_lock, flags);
810}
811
812void release_vm86_irqs(struct task_struct *task)
813{
814 int i;
815 for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++)
816 if (vm86_irqs[i].tsk == task)
817 free_vm86_irq(i);
818}
819
820static inline int get_and_reset_irq(int irqnumber)
821{
822 int bit;
823 unsigned long flags;
824 int ret = 0;
825
826 if (invalid_vm86_irq(irqnumber)) return 0;
827 if (vm86_irqs[irqnumber].tsk != current) return 0;
828 spin_lock_irqsave(&irqbits_lock, flags);
829 bit = irqbits & (1 << irqnumber);
830 irqbits &= ~bit;
831 if (bit) {
832 enable_irq(irqnumber);
833 ret = 1;
834 }
835
836 spin_unlock_irqrestore(&irqbits_lock, flags);
837 return ret;
838}
839
840
841static int do_vm86_irq_handling(int subfunction, int irqnumber)
842{
843 int ret;
844 switch (subfunction) {
845 case VM86_GET_AND_RESET_IRQ: {
846 return get_and_reset_irq(irqnumber);
847 }
848 case VM86_GET_IRQ_BITS: {
849 return irqbits;
850 }
851 case VM86_REQUEST_IRQ: {
852 int sig = irqnumber >> 8;
853 int irq = irqnumber & 255;
854 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
855 if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM;
856 if (invalid_vm86_irq(irq)) return -EPERM;
857 if (vm86_irqs[irq].tsk) return -EPERM;
858 ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL);
859 if (ret) return ret;
860 vm86_irqs[irq].sig = sig;
861 vm86_irqs[irq].tsk = current;
862 return irq;
863 }
864 case VM86_FREE_IRQ: {
865 if (invalid_vm86_irq(irqnumber)) return -EPERM;
866 if (!vm86_irqs[irqnumber].tsk) return 0;
867 if (vm86_irqs[irqnumber].tsk != current) return -EPERM;
868 free_vm86_irq(irqnumber);
869 return 0;
870 }
871 }
872 return -EINVAL;
873}
874
875