1
2
3
4
5
6
7
8#ifndef __ASM_AVR32_PROCESSOR_H
9#define __ASM_AVR32_PROCESSOR_H
10
11#include <asm/page.h>
12#include <asm/cache.h>
13
14#define TASK_SIZE 0x80000000
15
16#ifdef __KERNEL__
17#define STACK_TOP TASK_SIZE
18#define STACK_TOP_MAX STACK_TOP
19#endif
20
21#ifndef __ASSEMBLY__
22
23static inline void *current_text_addr(void)
24{
25 register void *pc asm("pc");
26 return pc;
27}
28
29enum arch_type {
30 ARCH_AVR32A,
31 ARCH_AVR32B,
32 ARCH_MAX
33};
34
35enum cpu_type {
36 CPU_MORGAN,
37 CPU_AT32AP,
38 CPU_MAX
39};
40
41enum tlb_config {
42 TLB_NONE,
43 TLB_SPLIT,
44 TLB_UNIFIED,
45 TLB_INVALID
46};
47
48#define AVR32_FEATURE_RMW (1 << 0)
49#define AVR32_FEATURE_DSP (1 << 1)
50#define AVR32_FEATURE_SIMD (1 << 2)
51#define AVR32_FEATURE_OCD (1 << 3)
52#define AVR32_FEATURE_PCTR (1 << 4)
53#define AVR32_FEATURE_JAVA (1 << 5)
54#define AVR32_FEATURE_FPU (1 << 6)
55
56struct avr32_cpuinfo {
57 struct clk *clk;
58 unsigned long loops_per_jiffy;
59 enum arch_type arch_type;
60 enum cpu_type cpu_type;
61 unsigned short arch_revision;
62 unsigned short cpu_revision;
63 enum tlb_config tlb_config;
64 unsigned long features;
65 u32 device_id;
66
67 struct cache_info icache;
68 struct cache_info dcache;
69};
70
71static inline unsigned int avr32_get_manufacturer_id(struct avr32_cpuinfo *cpu)
72{
73 return (cpu->device_id >> 1) & 0x7f;
74}
75static inline unsigned int avr32_get_product_number(struct avr32_cpuinfo *cpu)
76{
77 return (cpu->device_id >> 12) & 0xffff;
78}
79static inline unsigned int avr32_get_chip_revision(struct avr32_cpuinfo *cpu)
80{
81 return (cpu->device_id >> 28) & 0x0f;
82}
83
84extern struct avr32_cpuinfo boot_cpu_data;
85
86
87#define current_cpu_data boot_cpu_data
88
89
90
91
92#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
93
94#define cpu_relax() barrier()
95#define cpu_relax_lowlatency() cpu_relax()
96#define cpu_sync_pipeline() asm volatile("sub pc, -2" : : : "memory")
97
98struct cpu_context {
99 unsigned long sr;
100 unsigned long pc;
101 unsigned long ksp;
102 unsigned long r7;
103 unsigned long r6;
104 unsigned long r5;
105 unsigned long r4;
106 unsigned long r3;
107 unsigned long r2;
108 unsigned long r1;
109 unsigned long r0;
110};
111
112
113struct thread_struct {
114 struct cpu_context cpu_context;
115 unsigned long single_step_addr;
116 u16 single_step_insn;
117};
118
119#define INIT_THREAD { \
120 .cpu_context = { \
121 .ksp = sizeof(init_stack) + (long)&init_stack, \
122 }, \
123}
124
125
126
127
128#define start_thread(regs, new_pc, new_sp) \
129 do { \
130 memset(regs, 0, sizeof(*regs)); \
131 regs->sr = MODE_USER; \
132 regs->pc = new_pc & ~1; \
133 regs->sp = new_sp; \
134 } while(0)
135
136struct task_struct;
137
138
139extern void release_thread(struct task_struct *);
140
141
142#define thread_saved_pc(tsk) ((tsk)->thread.cpu_context.pc)
143
144struct pt_regs;
145extern unsigned long get_wchan(struct task_struct *p);
146extern void show_regs_log_lvl(struct pt_regs *regs, const char *log_lvl);
147extern void show_stack_log_lvl(struct task_struct *tsk, unsigned long sp,
148 struct pt_regs *regs, const char *log_lvl);
149
150#define task_pt_regs(p) \
151 ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
152
153#define KSTK_EIP(tsk) ((tsk)->thread.cpu_context.pc)
154#define KSTK_ESP(tsk) ((tsk)->thread.cpu_context.ksp)
155
156#define ARCH_HAS_PREFETCH
157
158static inline void prefetch(const void *x)
159{
160 const char *c = x;
161 asm volatile("pref %0" : : "r"(c));
162}
163#define PREFETCH_STRIDE L1_CACHE_BYTES
164
165#endif
166
167#endif
168