1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/init.h>
14#include <linux/cpu.h>
15#include <linux/bitmap.h>
16#include <linux/sysdev.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/vmalloc.h>
21#include <linux/mm.h>
22#include <linux/io.h>
23#include <asm/page.h>
24#include <asm/cacheflush.h>
25#include <cpu/sq.h>
26
27struct sq_mapping;
28
29struct sq_mapping {
30 const char *name;
31
32 unsigned long sq_addr;
33 unsigned long addr;
34 unsigned int size;
35
36 struct sq_mapping *next;
37};
38
39static struct sq_mapping *sq_mapping_list;
40static DEFINE_SPINLOCK(sq_mapping_lock);
41static struct kmem_cache *sq_cache;
42static unsigned long *sq_bitmap;
43
44#define store_queue_barrier() \
45do { \
46 (void)ctrl_inl(P4SEG_STORE_QUE); \
47 ctrl_outl(0, P4SEG_STORE_QUE + 0); \
48 ctrl_outl(0, P4SEG_STORE_QUE + 8); \
49} while (0);
50
51
52
53
54
55
56
57
58
59void sq_flush_range(unsigned long start, unsigned int len)
60{
61 unsigned long *sq = (unsigned long *)start;
62
63
64 for (len >>= 5; len--; sq += 8)
65 prefetchw(sq);
66
67
68 store_queue_barrier();
69}
70EXPORT_SYMBOL(sq_flush_range);
71
72static inline void sq_mapping_list_add(struct sq_mapping *map)
73{
74 struct sq_mapping **p, *tmp;
75
76 spin_lock_irq(&sq_mapping_lock);
77
78 p = &sq_mapping_list;
79 while ((tmp = *p) != NULL)
80 p = &tmp->next;
81
82 map->next = tmp;
83 *p = map;
84
85 spin_unlock_irq(&sq_mapping_lock);
86}
87
88static inline void sq_mapping_list_del(struct sq_mapping *map)
89{
90 struct sq_mapping **p, *tmp;
91
92 spin_lock_irq(&sq_mapping_lock);
93
94 for (p = &sq_mapping_list; (tmp = *p); p = &tmp->next)
95 if (tmp == map) {
96 *p = tmp->next;
97 break;
98 }
99
100 spin_unlock_irq(&sq_mapping_lock);
101}
102
103static int __sq_remap(struct sq_mapping *map, unsigned long flags)
104{
105#if defined(CONFIG_MMU)
106 struct vm_struct *vma;
107
108 vma = __get_vm_area(map->size, VM_ALLOC, map->sq_addr, SQ_ADDRMAX);
109 if (!vma)
110 return -ENOMEM;
111
112 vma->phys_addr = map->addr;
113
114 if (ioremap_page_range((unsigned long)vma->addr,
115 (unsigned long)vma->addr + map->size,
116 vma->phys_addr, __pgprot(flags))) {
117 vunmap(vma->addr);
118 return -EAGAIN;
119 }
120#else
121
122
123
124
125
126 ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0);
127 ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1);
128#endif
129
130 return 0;
131}
132
133
134
135
136
137
138
139
140
141
142
143
144unsigned long sq_remap(unsigned long phys, unsigned int size,
145 const char *name, unsigned long flags)
146{
147 struct sq_mapping *map;
148 unsigned long end;
149 unsigned int psz;
150 int ret, page;
151
152
153 end = phys + size - 1;
154 if (unlikely(!size || end < phys))
155 return -EINVAL;
156
157 if (unlikely(phys < virt_to_phys(high_memory)))
158 return -EINVAL;
159
160 phys &= PAGE_MASK;
161 size = PAGE_ALIGN(end + 1) - phys;
162
163 map = kmem_cache_alloc(sq_cache, GFP_KERNEL);
164 if (unlikely(!map))
165 return -ENOMEM;
166
167 map->addr = phys;
168 map->size = size;
169 map->name = name;
170
171 page = bitmap_find_free_region(sq_bitmap, 0x04000000 >> PAGE_SHIFT,
172 get_order(map->size));
173 if (unlikely(page < 0)) {
174 ret = -ENOSPC;
175 goto out;
176 }
177
178 map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT);
179
180 ret = __sq_remap(map, pgprot_val(PAGE_KERNEL_NOCACHE) | flags);
181 if (unlikely(ret != 0))
182 goto out;
183
184 psz = (size + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
185 pr_info("sqremap: %15s [%4d page%s] va 0x%08lx pa 0x%08lx\n",
186 likely(map->name) ? map->name : "???",
187 psz, psz == 1 ? " " : "s",
188 map->sq_addr, map->addr);
189
190 sq_mapping_list_add(map);
191
192 return map->sq_addr;
193
194out:
195 kmem_cache_free(sq_cache, map);
196 return ret;
197}
198EXPORT_SYMBOL(sq_remap);
199
200
201
202
203
204
205
206
207
208void sq_unmap(unsigned long vaddr)
209{
210 struct sq_mapping **p, *map;
211 int page;
212
213 for (p = &sq_mapping_list; (map = *p); p = &map->next)
214 if (map->sq_addr == vaddr)
215 break;
216
217 if (unlikely(!map)) {
218 printk("%s: bad store queue address 0x%08lx\n",
219 __func__, vaddr);
220 return;
221 }
222
223 page = (map->sq_addr - P4SEG_STORE_QUE) >> PAGE_SHIFT;
224 bitmap_release_region(sq_bitmap, page, get_order(map->size));
225
226#ifdef CONFIG_MMU
227 {
228
229
230
231 struct vm_struct *vma;
232
233 vma = remove_vm_area((void *)(map->sq_addr & PAGE_MASK));
234 if (!vma) {
235 printk(KERN_ERR "%s: bad address 0x%08lx\n",
236 __func__, map->sq_addr);
237 return;
238 }
239 }
240#endif
241
242 sq_mapping_list_del(map);
243
244 kmem_cache_free(sq_cache, map);
245}
246EXPORT_SYMBOL(sq_unmap);
247
248
249
250
251
252
253
254
255
256
257
258static struct kobject *sq_kobject[NR_CPUS];
259
260struct sq_sysfs_attr {
261 struct attribute attr;
262 ssize_t (*show)(char *buf);
263 ssize_t (*store)(const char *buf, size_t count);
264};
265
266#define to_sq_sysfs_attr(a) container_of(a, struct sq_sysfs_attr, attr)
267
268static ssize_t sq_sysfs_show(struct kobject *kobj, struct attribute *attr,
269 char *buf)
270{
271 struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr);
272
273 if (likely(sattr->show))
274 return sattr->show(buf);
275
276 return -EIO;
277}
278
279static ssize_t sq_sysfs_store(struct kobject *kobj, struct attribute *attr,
280 const char *buf, size_t count)
281{
282 struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr);
283
284 if (likely(sattr->store))
285 return sattr->store(buf, count);
286
287 return -EIO;
288}
289
290static ssize_t mapping_show(char *buf)
291{
292 struct sq_mapping **list, *entry;
293 char *p = buf;
294
295 for (list = &sq_mapping_list; (entry = *list); list = &entry->next)
296 p += sprintf(p, "%08lx-%08lx [%08lx]: %s\n",
297 entry->sq_addr, entry->sq_addr + entry->size,
298 entry->addr, entry->name);
299
300 return p - buf;
301}
302
303static ssize_t mapping_store(const char *buf, size_t count)
304{
305 unsigned long base = 0, len = 0;
306
307 sscanf(buf, "%lx %lx", &base, &len);
308 if (!base)
309 return -EIO;
310
311 if (likely(len)) {
312 int ret = sq_remap(base, len, "Userspace",
313 pgprot_val(PAGE_SHARED));
314 if (ret < 0)
315 return ret;
316 } else
317 sq_unmap(base);
318
319 return count;
320}
321
322static struct sq_sysfs_attr mapping_attr =
323 __ATTR(mapping, 0644, mapping_show, mapping_store);
324
325static struct attribute *sq_sysfs_attrs[] = {
326 &mapping_attr.attr,
327 NULL,
328};
329
330static struct sysfs_ops sq_sysfs_ops = {
331 .show = sq_sysfs_show,
332 .store = sq_sysfs_store,
333};
334
335static struct kobj_type ktype_percpu_entry = {
336 .sysfs_ops = &sq_sysfs_ops,
337 .default_attrs = sq_sysfs_attrs,
338};
339
340static int __devinit sq_sysdev_add(struct sys_device *sysdev)
341{
342 unsigned int cpu = sysdev->id;
343 struct kobject *kobj;
344 int error;
345
346 sq_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
347 if (unlikely(!sq_kobject[cpu]))
348 return -ENOMEM;
349
350 kobj = sq_kobject[cpu];
351 error = kobject_init_and_add(kobj, &ktype_percpu_entry, &sysdev->kobj,
352 "%s", "sq");
353 if (!error)
354 kobject_uevent(kobj, KOBJ_ADD);
355 return error;
356}
357
358static int __devexit sq_sysdev_remove(struct sys_device *sysdev)
359{
360 unsigned int cpu = sysdev->id;
361 struct kobject *kobj = sq_kobject[cpu];
362
363 kobject_put(kobj);
364 return 0;
365}
366
367static struct sysdev_driver sq_sysdev_driver = {
368 .add = sq_sysdev_add,
369 .remove = __devexit_p(sq_sysdev_remove),
370};
371
372static int __init sq_api_init(void)
373{
374 unsigned int nr_pages = 0x04000000 >> PAGE_SHIFT;
375 unsigned int size = (nr_pages + (BITS_PER_LONG - 1)) / BITS_PER_LONG;
376 int ret = -ENOMEM;
377
378 printk(KERN_NOTICE "sq: Registering store queue API.\n");
379
380 sq_cache = kmem_cache_create("store_queue_cache",
381 sizeof(struct sq_mapping), 0, 0, NULL);
382 if (unlikely(!sq_cache))
383 return ret;
384
385 sq_bitmap = kzalloc(size, GFP_KERNEL);
386 if (unlikely(!sq_bitmap))
387 goto out;
388
389 ret = sysdev_driver_register(&cpu_sysdev_class, &sq_sysdev_driver);
390 if (unlikely(ret != 0))
391 goto out;
392
393 return 0;
394
395out:
396 kfree(sq_bitmap);
397 kmem_cache_destroy(sq_cache);
398
399 return ret;
400}
401
402static void __exit sq_api_exit(void)
403{
404 sysdev_driver_unregister(&cpu_sysdev_class, &sq_sysdev_driver);
405 kfree(sq_bitmap);
406 kmem_cache_destroy(sq_cache);
407}
408
409module_init(sq_api_init);
410module_exit(sq_api_exit);
411
412MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>");
413MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues");
414MODULE_LICENSE("GPL");
415