1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#ifndef _ASM_TILE_PROCESSOR_H
16#define _ASM_TILE_PROCESSOR_H
17
18#ifndef __ASSEMBLY__
19
20
21
22
23
24#include <linux/types.h>
25#include <asm/ptrace.h>
26#include <asm/percpu.h>
27
28#include <arch/chip.h>
29#include <arch/spr_def.h>
30
31struct task_struct;
32struct thread_struct;
33
34typedef struct {
35 unsigned long seg;
36} mm_segment_t;
37
38
39
40
41
42void *current_text_addr(void);
43
44#if CHIP_HAS_TILE_DMA()
45
46struct tile_dma_state {
47 int enabled;
48 unsigned long src;
49 unsigned long dest;
50 unsigned long strides;
51 unsigned long chunk_size;
52 unsigned long src_chunk;
53 unsigned long dest_chunk;
54 unsigned long byte;
55 unsigned long status;
56};
57
58
59
60
61
62#define DMA_STATUS_MASK \
63 (SPR_DMA_STATUS__RUNNING_MASK | SPR_DMA_STATUS__DONE_MASK)
64#endif
65
66
67
68
69
70struct async_tlb {
71 short fault_num;
72 char is_fault;
73 char is_write;
74 unsigned long address;
75};
76
77#ifdef CONFIG_HARDWALL
78struct hardwall_info;
79#endif
80
81struct thread_struct {
82
83 unsigned long ksp;
84
85 unsigned long pc;
86
87 unsigned long usp0;
88
89 pid_t creator_pid;
90#if CHIP_HAS_TILE_DMA()
91
92 struct tile_dma_state tile_dma_state;
93#endif
94
95 unsigned long ex_context[2];
96
97 unsigned long system_save[4];
98
99 unsigned long long interrupt_mask;
100
101 unsigned long intctrl_0;
102#if CHIP_HAS_PROC_STATUS_SPR()
103
104 unsigned long proc_status;
105#endif
106#if !CHIP_HAS_FIXED_INTVEC_BASE()
107
108 unsigned long interrupt_vector_base;
109#endif
110#if CHIP_HAS_TILE_RTF_HWM()
111
112 unsigned long tile_rtf_hwm;
113#endif
114#if CHIP_HAS_DSTREAM_PF()
115
116 unsigned long dstream_pf;
117#endif
118#ifdef CONFIG_HARDWALL
119
120 struct hardwall_info *hardwall;
121
122 struct list_head hardwall_list;
123#endif
124#if CHIP_HAS_TILE_DMA()
125
126 struct async_tlb dma_async_tlb;
127#endif
128#if CHIP_HAS_SN_PROC()
129
130 int sn_proc_running;
131
132 struct async_tlb sn_async_tlb;
133#endif
134};
135
136#endif
137
138
139
140
141
142#define STACK_TOP_DELTA 8
143
144
145
146
147
148
149#ifdef __tilegx__
150#define KSTK_PTREGS_GAP 48
151#else
152#define KSTK_PTREGS_GAP 56
153#endif
154
155#ifndef __ASSEMBLY__
156
157#ifdef __tilegx__
158#define TASK_SIZE_MAX (MEM_LOW_END + 1)
159#else
160#define TASK_SIZE_MAX PAGE_OFFSET
161#endif
162
163
164#ifdef CONFIG_COMPAT
165#define COMPAT_TASK_SIZE (1UL << 31)
166#define TASK_SIZE ((current_thread_info()->status & TS_COMPAT) ?\
167 COMPAT_TASK_SIZE : TASK_SIZE_MAX)
168#else
169#define TASK_SIZE TASK_SIZE_MAX
170#endif
171
172
173#define VDSO_BASE (TASK_SIZE - PAGE_SIZE)
174
175#define STACK_TOP VDSO_BASE
176
177
178#define STACK_TOP_MAX TASK_SIZE_MAX
179
180
181
182
183
184#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
185
186#define HAVE_ARCH_PICK_MMAP_LAYOUT
187
188#define INIT_THREAD { \
189 .ksp = (unsigned long)init_stack + THREAD_SIZE - STACK_TOP_DELTA, \
190 .interrupt_mask = -1ULL \
191}
192
193
194DECLARE_PER_CPU(unsigned long, boot_sp);
195
196
197DECLARE_PER_CPU(unsigned long, boot_pc);
198
199
200static inline void start_thread(struct pt_regs *regs,
201 unsigned long pc, unsigned long usp)
202{
203 regs->pc = pc;
204 regs->sp = usp;
205}
206
207
208static inline void release_thread(struct task_struct *dead_task)
209{
210
211}
212
213
214#define prepare_to_copy(tsk) do { } while (0)
215
216extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
217
218extern int do_work_pending(struct pt_regs *regs, u32 flags);
219
220
221
222
223
224
225#define thread_saved_pc(t) ((t)->thread.pc)
226
227unsigned long get_wchan(struct task_struct *p);
228
229
230#define task_ksp0(task) ((unsigned long)(task)->stack + THREAD_SIZE)
231
232
233#define KSTK_TOP(task) (task_ksp0(task) - STACK_TOP_DELTA)
234#define task_pt_regs(task) \
235 ((struct pt_regs *)(task_ksp0(task) - KSTK_PTREGS_GAP) - 1)
236#define task_sp(task) (task_pt_regs(task)->sp)
237#define task_pc(task) (task_pt_regs(task)->pc)
238
239#define KSTK_EIP(task) task_pc(task)
240#define KSTK_ESP(task) task_sp(task)
241
242
243#ifdef __tilegx__
244# define REGFMT "0x%016lx"
245#else
246# define REGFMT "0x%08lx"
247#endif
248
249
250
251
252
253
254static inline void cpu_relax(void)
255{
256 __insn_mfspr(SPR_PASS);
257 barrier();
258}
259
260
261struct seq_operations;
262extern const struct seq_operations cpuinfo_op;
263
264
265extern char chip_model[64];
266
267
268extern int node_controller[];
269
270#if CHIP_HAS_CBOX_HOME_MAP()
271
272extern int hash_default;
273
274
275extern int kstack_hash;
276
277
278#define uheap_hash hash_default
279
280#else
281#define hash_default 0
282#define kstack_hash 0
283#define uheap_hash 0
284#endif
285
286
287extern int kdata_huge;
288
289
290#define ARCH_HAS_PREFETCH
291#define prefetch(x) __builtin_prefetch(x)
292#define PREFETCH_STRIDE CHIP_L2_LINE_SIZE()
293
294
295#ifdef __tilegx__
296#define prefetch_L1(x) __insn_prefetch_l1_fault((void *)(x))
297#else
298#define prefetch_L1(x) __insn_prefetch_L1((void *)(x))
299#endif
300
301#else
302
303
304#define CPU_RELAX mfspr zero, SPR_PASS
305
306#endif
307
308
309#if SPR_EX_CONTEXT_1_1__PL_SHIFT != 0
310# error Fix assembly assumptions about PL
311#endif
312
313
314#if SPR_EX_CONTEXT_1_1__PL_SHIFT != SPR_EX_CONTEXT_0_1__PL_SHIFT || \
315 SPR_EX_CONTEXT_1_1__PL_RMASK != SPR_EX_CONTEXT_0_1__PL_RMASK || \
316 SPR_EX_CONTEXT_1_1__ICS_SHIFT != SPR_EX_CONTEXT_0_1__ICS_SHIFT || \
317 SPR_EX_CONTEXT_1_1__ICS_RMASK != SPR_EX_CONTEXT_0_1__ICS_RMASK
318# error Fix assumptions that EX1 macros work for both PL0 and PL1
319#endif
320
321
322#define EX1_PL(ex1) \
323 (((ex1) >> SPR_EX_CONTEXT_1_1__PL_SHIFT) & SPR_EX_CONTEXT_1_1__PL_RMASK)
324#define EX1_ICS(ex1) \
325 (((ex1) >> SPR_EX_CONTEXT_1_1__ICS_SHIFT) & SPR_EX_CONTEXT_1_1__ICS_RMASK)
326#define PL_ICS_EX1(pl, ics) \
327 (((pl) << SPR_EX_CONTEXT_1_1__PL_SHIFT) | \
328 ((ics) << SPR_EX_CONTEXT_1_1__ICS_SHIFT))
329
330
331
332
333
334#define USER_PL 0
335#if CONFIG_KERNEL_PL == 2
336#define GUEST_PL 1
337#endif
338#define KERNEL_PL CONFIG_KERNEL_PL
339
340
341#define CPU_LOG_MASK_VALUE 12
342#define CPU_MASK_VALUE ((1 << CPU_LOG_MASK_VALUE) - 1)
343#if CONFIG_NR_CPUS > CPU_MASK_VALUE
344# error Too many cpus!
345#endif
346#define raw_smp_processor_id() \
347 ((int)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & CPU_MASK_VALUE)
348#define get_current_ksp0() \
349 (__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & ~CPU_MASK_VALUE)
350#define next_current_ksp0(task) ({ \
351 unsigned long __ksp0 = task_ksp0(task); \
352 int __cpu = raw_smp_processor_id(); \
353 BUG_ON(__ksp0 & CPU_MASK_VALUE); \
354 __ksp0 | __cpu; \
355})
356
357#endif
358