1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/sched.h>
22#include <linux/types.h>
23#include <linux/module.h>
24#include <linux/tick.h>
25#include <linux/uaccess.h>
26#include <linux/slab.h>
27#include <linux/tracehook.h>
28
29
30
31
32
33
34
35
36
37
38void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
39{
40
41 memset(regs, 0, sizeof(*regs));
42
43 pt_set_usermode(regs);
44 pt_set_elr(regs, pc);
45 pt_set_rte_sp(regs, sp);
46}
47
48
49
50
51
52
53void arch_cpu_idle(void)
54{
55 __vmwait();
56
57 local_irq_enable();
58}
59
60
61
62
63unsigned long thread_saved_pc(struct task_struct *tsk)
64{
65 return 0;
66}
67
68
69
70
71int copy_thread(unsigned long clone_flags, unsigned long usp,
72 unsigned long arg, struct task_struct *p)
73{
74 struct thread_info *ti = task_thread_info(p);
75 struct hexagon_switch_stack *ss;
76 struct pt_regs *childregs;
77 asmlinkage void ret_from_fork(void);
78
79 childregs = (struct pt_regs *) (((unsigned long) ti + THREAD_SIZE) -
80 sizeof(*childregs));
81
82 ti->regs = childregs;
83
84
85
86
87
88
89
90 ss = (struct hexagon_switch_stack *) ((unsigned long) childregs -
91 sizeof(*ss));
92 ss->lr = (unsigned long)ret_from_fork;
93 p->thread.switch_sp = ss;
94 if (unlikely(p->flags & PF_KTHREAD)) {
95 memset(childregs, 0, sizeof(struct pt_regs));
96
97 ss->r24 = usp;
98 ss->r25 = arg;
99 pt_set_kmode(childregs);
100 return 0;
101 }
102 memcpy(childregs, current_pt_regs(), sizeof(*childregs));
103 ss->r2524 = 0;
104
105 if (usp)
106 pt_set_rte_sp(childregs, usp);
107
108
109 childregs->r00 = 0;
110
111
112
113
114
115
116
117
118
119
120 if (clone_flags & CLONE_SETTLS)
121 childregs->ugp = childregs->r04;
122
123
124
125
126
127
128
129 return 0;
130}
131
132
133
134
135void release_thread(struct task_struct *dead_task)
136{
137}
138
139
140
141
142void exit_thread(void)
143{
144}
145
146
147
148
149void flush_thread(void)
150{
151}
152
153
154
155
156
157
158unsigned long get_wchan(struct task_struct *p)
159{
160 unsigned long fp, pc;
161 unsigned long stack_page;
162 int count = 0;
163 if (!p || p == current || p->state == TASK_RUNNING)
164 return 0;
165
166 stack_page = (unsigned long)task_stack_page(p);
167 fp = ((struct hexagon_switch_stack *)p->thread.switch_sp)->fp;
168 do {
169 if (fp < (stack_page + sizeof(struct thread_info)) ||
170 fp >= (THREAD_SIZE - 8 + stack_page))
171 return 0;
172 pc = ((unsigned long *)fp)[1];
173 if (!in_sched_functions(pc))
174 return pc;
175 fp = *(unsigned long *) fp;
176 } while (count++ < 16);
177
178 return 0;
179}
180
181
182
183
184int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
185{
186 return 0;
187}
188
189
190
191
192
193
194
195
196
197
198int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
199{
200 if (!(thread_info_flags & _TIF_WORK_MASK)) {
201 return 0;
202 }
203
204 local_irq_enable();
205
206 if (thread_info_flags & _TIF_NEED_RESCHED) {
207 schedule();
208 return 1;
209 }
210
211 if (thread_info_flags & _TIF_SIGPENDING) {
212 do_signal(regs);
213 return 1;
214 }
215
216 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
217 clear_thread_flag(TIF_NOTIFY_RESUME);
218 tracehook_notify_resume(regs);
219 return 1;
220 }
221
222
223 panic("%s: bad thread_info flags 0x%08x\n", __func__,
224 thread_info_flags);
225}
226