1
2
3
4#include <linux/sched.h>
5#include <linux/sched/task_stack.h>
6#include <linux/mm.h>
7#include <linux/ptrace.h>
8#include <asm/desc.h>
9#include <asm/mmu_context.h>
10
11unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
12{
13 unsigned long addr, seg;
14
15 addr = regs->ip;
16 seg = regs->cs & 0xffff;
17 if (v8086_mode(regs)) {
18 addr = (addr & 0xffff) + (seg << 4);
19 return addr;
20 }
21
22#ifdef CONFIG_MODIFY_LDT_SYSCALL
23
24
25
26
27
28
29 if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) {
30 struct desc_struct *desc;
31 unsigned long base;
32
33 seg >>= 3;
34
35 mutex_lock(&child->mm->context.lock);
36 if (unlikely(!child->mm->context.ldt ||
37 seg >= child->mm->context.ldt->size))
38 addr = -1L;
39 else {
40 desc = &child->mm->context.ldt->entries[seg];
41 base = get_desc_base(desc);
42
43
44 if (!desc->d)
45 addr &= 0xffff;
46 addr += base;
47 }
48 mutex_unlock(&child->mm->context.lock);
49 }
50#endif
51
52 return addr;
53}
54
55static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
56{
57 int i, copied;
58 unsigned char opcode[15];
59 unsigned long addr = convert_ip_to_linear(child, regs);
60
61 copied = access_process_vm(child, addr, opcode, sizeof(opcode),
62 FOLL_FORCE);
63 for (i = 0; i < copied; i++) {
64 switch (opcode[i]) {
65
66 case 0x9d: case 0xcf:
67 return 1;
68
69
70
71
72 case 0x66: case 0x67:
73 continue;
74
75 case 0x26: case 0x2e:
76 case 0x36: case 0x3e:
77 case 0x64: case 0x65:
78 case 0xf0: case 0xf2: case 0xf3:
79 continue;
80
81#ifdef CONFIG_X86_64
82 case 0x40 ... 0x4f:
83 if (!user_64bit_mode(regs))
84
85 return 0;
86
87 continue;
88#endif
89
90
91
92
93
94
95
96
97
98
99 case 0x9c:
100 default:
101 return 0;
102 }
103 }
104 return 0;
105}
106
107
108
109
110static int enable_single_step(struct task_struct *child)
111{
112 struct pt_regs *regs = task_pt_regs(child);
113 unsigned long oflags;
114
115
116
117
118
119
120
121
122
123
124
125 if (unlikely(test_tsk_thread_flag(child, TIF_SINGLESTEP)))
126 regs->flags |= X86_EFLAGS_TF;
127
128
129
130
131
132
133 set_tsk_thread_flag(child, TIF_SINGLESTEP);
134
135 oflags = regs->flags;
136
137
138 regs->flags |= X86_EFLAGS_TF;
139
140
141
142
143
144
145
146
147
148
149 if (is_setting_trap_flag(child, regs)) {
150 clear_tsk_thread_flag(child, TIF_FORCED_TF);
151 return 0;
152 }
153
154
155
156
157
158 if (oflags & X86_EFLAGS_TF)
159 return test_tsk_thread_flag(child, TIF_FORCED_TF);
160
161 set_tsk_thread_flag(child, TIF_FORCED_TF);
162
163 return 1;
164}
165
166void set_task_blockstep(struct task_struct *task, bool on)
167{
168 unsigned long debugctl;
169
170
171
172
173
174
175
176
177
178
179
180 local_irq_disable();
181 debugctl = get_debugctlmsr();
182 if (on) {
183 debugctl |= DEBUGCTLMSR_BTF;
184 set_tsk_thread_flag(task, TIF_BLOCKSTEP);
185 } else {
186 debugctl &= ~DEBUGCTLMSR_BTF;
187 clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
188 }
189 if (task == current)
190 update_debugctlmsr(debugctl);
191 local_irq_enable();
192}
193
194
195
196
197static void enable_step(struct task_struct *child, bool block)
198{
199
200
201
202
203
204
205
206 if (enable_single_step(child) && block)
207 set_task_blockstep(child, true);
208 else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP))
209 set_task_blockstep(child, false);
210}
211
212void user_enable_single_step(struct task_struct *child)
213{
214 enable_step(child, 0);
215}
216
217void user_enable_block_step(struct task_struct *child)
218{
219 enable_step(child, 1);
220}
221
222void user_disable_single_step(struct task_struct *child)
223{
224
225
226
227 if (test_tsk_thread_flag(child, TIF_BLOCKSTEP))
228 set_task_blockstep(child, false);
229
230
231 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
232
233
234 if (test_and_clear_tsk_thread_flag(child, TIF_FORCED_TF))
235 task_pt_regs(child)->flags &= ~X86_EFLAGS_TF;
236}
237