1
2
3
4
5#ifndef __ASM_METAG_PROCESSOR_H
6#define __ASM_METAG_PROCESSOR_H
7
8#include <linux/atomic.h>
9
10#include <asm/page.h>
11#include <asm/ptrace.h>
12#include <asm/metag_regs.h>
13
14
15
16
17
18#define current_text_addr() ({ __label__ _l; _l: &&_l; })
19
20
21#define TASK_SIZE PAGE_OFFSET
22
23#define STACK_TOP (TASK_SIZE - PAGE_SIZE)
24#define STACK_TOP_MAX STACK_TOP
25
26#define STACK_SIZE_MAX (CONFIG_MAX_STACK_SIZE_MB*1024*1024)
27
28
29
30
31#define TASK_UNMAPPED_BASE META_MEMORY_BASE
32
33typedef struct {
34 unsigned long seg;
35} mm_segment_t;
36
37#ifdef CONFIG_METAG_FPU
38struct meta_fpu_context {
39 TBICTXEXTFPU fpstate;
40 union {
41 struct {
42 TBICTXEXTBB4 fx8_15;
43 TBICTXEXTFPACC fpacc;
44 } fx8_15;
45 struct {
46 TBICTXEXTFPACC fpacc;
47 TBICTXEXTBB4 unused;
48 } nofx8_15;
49 } extfpstate;
50 bool needs_restore;
51};
52#else
53struct meta_fpu_context {};
54#endif
55
56#ifdef CONFIG_METAG_DSP
57struct meta_ext_context {
58 struct {
59 TBIEXTCTX ctx;
60 TBICTXEXTBB8 bb8;
61 TBIDUAL ax[TBICTXEXTAXX_BYTES / sizeof(TBIDUAL)];
62 TBICTXEXTHL2 hl2;
63 TBICTXEXTTDPR ext;
64 TBICTXEXTRP6 rp;
65 } regs;
66
67
68 void *ram[2];
69
70
71 unsigned int ram_sz[2];
72};
73#else
74struct meta_ext_context {};
75#endif
76
77struct thread_struct {
78 PTBICTX kernel_context;
79
80 unsigned int user_flags;
81 struct meta_fpu_context *fpu_context;
82 void __user *tls_ptr;
83 unsigned short int_depth;
84 unsigned short txdefr_failure;
85 struct meta_ext_context *dsp_context;
86};
87
88#define INIT_THREAD { \
89 NULL, \
90 0, \
91 NULL, \
92 NULL, \
93 1, \
94 0, \
95 NULL, \
96}
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112#define start_thread(regs, pc, usp) do { \
113 unsigned int *argc = (unsigned int *) bprm->exec; \
114 current->thread.int_depth = 1; \
115 \
116 regs->ctx.SaveMask = TBICTX_PRIV_BIT; \
117 regs->ctx.CurrPC = pc; \
118 regs->ctx.AX[0].U0 = usp; \
119 regs->ctx.DX[3].U1 = *((int *)argc); \
120 regs->ctx.DX[3].U0 = (int)((int *)argc + 1); \
121 regs->ctx.DX[2].U1 = (int)((int *)argc + \
122 regs->ctx.DX[3].U1 + 2); \
123 regs->ctx.DX[2].U0 = 0; \
124} while (0)
125
126
127struct task_struct;
128
129
130static inline void release_thread(struct task_struct *dead_task)
131{
132}
133
134#define copy_segments(tsk, mm) do { } while (0)
135#define release_segments(mm) do { } while (0)
136
137
138
139
140#define thread_saved_pc(tsk) \
141 ((unsigned long)(tsk)->thread.kernel_context->CurrPC)
142#define thread_saved_sp(tsk) \
143 ((unsigned long)(tsk)->thread.kernel_context->AX[0].U0)
144#define thread_saved_fp(tsk) \
145 ((unsigned long)(tsk)->thread.kernel_context->AX[1].U0)
146
147unsigned long get_wchan(struct task_struct *p);
148
149#define KSTK_EIP(tsk) (task_pt_regs(tsk)->ctx.CurrPC)
150#define KSTK_ESP(tsk) (task_pt_regs(tsk)->ctx.AX[0].U0)
151
152#define user_stack_pointer(regs) ((regs)->ctx.AX[0].U0)
153
154#define cpu_relax() barrier()
155#define cpu_relax_lowlatency() cpu_relax()
156
157extern void setup_priv(void);
158
159static inline unsigned int hard_processor_id(void)
160{
161 unsigned int id;
162
163 asm volatile ("MOV %0, TXENABLE\n"
164 "AND %0, %0, %1\n"
165 "LSR %0, %0, %2\n"
166 : "=&d" (id)
167 : "I" (TXENABLE_THREAD_BITS),
168 "K" (TXENABLE_THREAD_S)
169 );
170
171 return id;
172}
173
174#define OP3_EXIT 0
175
176#define HALT_OK 0
177#define HALT_PANIC -1
178
179
180
181
182
183
184static inline void hard_processor_halt(int exit_code)
185{
186 asm volatile ("MOV D1Ar1, %0\n"
187 "MOV D0Ar6, %1\n"
188 "MSETL [A0StP],D0Ar6,D0Ar4,D0Ar2\n"
189 "1:\n"
190 "SWITCH #0xC30006\n"
191 "B 1b\n"
192 : : "r" (exit_code), "K" (OP3_EXIT));
193}
194
195
196extern void (*soc_restart)(char *cmd);
197extern void (*soc_halt)(void);
198
199extern void show_trace(struct task_struct *tsk, unsigned long *sp,
200 struct pt_regs *regs);
201
202extern const struct seq_operations cpuinfo_op;
203
204#endif
205