1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/sched.h>
22#include <linux/types.h>
23#include <linux/module.h>
24#include <linux/tick.h>
25#include <linux/uaccess.h>
26#include <linux/slab.h>
27#include <linux/tracehook.h>
28
29
30
31
32
33
34
35
36
37
38void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
39{
40
41 set_fs(USER_DS);
42
43 memset(regs, 0, sizeof(*regs));
44
45 pt_set_usermode(regs);
46 pt_set_elr(regs, pc);
47 pt_set_rte_sp(regs, sp);
48}
49
50
51
52
53
54
55void arch_cpu_idle(void)
56{
57 __vmwait();
58
59 local_irq_enable();
60}
61
62
63
64
65unsigned long thread_saved_pc(struct task_struct *tsk)
66{
67 return 0;
68}
69
70
71
72
73int copy_thread(unsigned long clone_flags, unsigned long usp,
74 unsigned long arg, struct task_struct *p)
75{
76 struct thread_info *ti = task_thread_info(p);
77 struct hexagon_switch_stack *ss;
78 struct pt_regs *childregs;
79 asmlinkage void ret_from_fork(void);
80
81 childregs = (struct pt_regs *) (((unsigned long) ti + THREAD_SIZE) -
82 sizeof(*childregs));
83
84 ti->regs = childregs;
85
86
87
88
89
90
91
92 ss = (struct hexagon_switch_stack *) ((unsigned long) childregs -
93 sizeof(*ss));
94 ss->lr = (unsigned long)ret_from_fork;
95 p->thread.switch_sp = ss;
96 if (unlikely(p->flags & PF_KTHREAD)) {
97 memset(childregs, 0, sizeof(struct pt_regs));
98
99 ss->r24 = usp;
100 ss->r25 = arg;
101 pt_set_kmode(childregs);
102 return 0;
103 }
104 memcpy(childregs, current_pt_regs(), sizeof(*childregs));
105 ss->r2524 = 0;
106
107 if (usp)
108 pt_set_rte_sp(childregs, usp);
109
110
111 childregs->r00 = 0;
112
113
114
115
116
117
118
119
120
121
122 if (clone_flags & CLONE_SETTLS)
123 childregs->ugp = childregs->r04;
124
125
126
127
128
129
130
131 return 0;
132}
133
134
135
136
137void release_thread(struct task_struct *dead_task)
138{
139}
140
141
142
143
144void exit_thread(void)
145{
146}
147
148
149
150
151void flush_thread(void)
152{
153}
154
155
156
157
158
159
160unsigned long get_wchan(struct task_struct *p)
161{
162 unsigned long fp, pc;
163 unsigned long stack_page;
164 int count = 0;
165 if (!p || p == current || p->state == TASK_RUNNING)
166 return 0;
167
168 stack_page = (unsigned long)task_stack_page(p);
169 fp = ((struct hexagon_switch_stack *)p->thread.switch_sp)->fp;
170 do {
171 if (fp < (stack_page + sizeof(struct thread_info)) ||
172 fp >= (THREAD_SIZE - 8 + stack_page))
173 return 0;
174 pc = ((unsigned long *)fp)[1];
175 if (!in_sched_functions(pc))
176 return pc;
177 fp = *(unsigned long *) fp;
178 } while (count++ < 16);
179
180 return 0;
181}
182
183
184
185
186int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
187{
188 return 0;
189}
190
191
192
193
194
195
196
197
198
199
200int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
201{
202 if (!(thread_info_flags & _TIF_WORK_MASK)) {
203 return 0;
204 }
205
206 local_irq_enable();
207
208 if (thread_info_flags & _TIF_NEED_RESCHED) {
209 schedule();
210 return 1;
211 }
212
213 if (thread_info_flags & _TIF_SIGPENDING) {
214 do_signal(regs);
215 return 1;
216 }
217
218 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
219 clear_thread_flag(TIF_NOTIFY_RESUME);
220 tracehook_notify_resume(regs);
221 return 1;
222 }
223
224
225 panic("%s: bad thread_info flags 0x%08x\n", __func__,
226 thread_info_flags);
227}
228