1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/slab.h>
24#include <linux/debugfs.h>
25#include <linux/spinlock.h>
26#include <asm/smp.h>
27#include <asm/uaccess.h>
28#include <asm/firmware.h>
29#include <asm/lppaca.h>
30#include <asm/debug.h>
31#include <asm/plpar_wrappers.h>
32#include <asm/machdep.h>
33
34struct dtl {
35 struct dtl_entry *buf;
36 struct dentry *file;
37 int cpu;
38 int buf_entries;
39 u64 last_idx;
40 spinlock_t lock;
41};
42static DEFINE_PER_CPU(struct dtl, cpu_dtl);
43
44
45
46
47
48
49
50static u8 dtl_event_mask = 0x7;
51
52
53
54
55
56
57static int dtl_buf_entries = N_DISPATCH_LOG;
58
59#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
60struct dtl_ring {
61 u64 write_index;
62 struct dtl_entry *write_ptr;
63 struct dtl_entry *buf;
64 struct dtl_entry *buf_end;
65 u8 saved_dtl_mask;
66};
67
68static DEFINE_PER_CPU(struct dtl_ring, dtl_rings);
69
70static atomic_t dtl_count;
71
72
73
74
75
76static void consume_dtle(struct dtl_entry *dtle, u64 index)
77{
78 struct dtl_ring *dtlr = this_cpu_ptr(&dtl_rings);
79 struct dtl_entry *wp = dtlr->write_ptr;
80 struct lppaca *vpa = local_paca->lppaca_ptr;
81
82 if (!wp)
83 return;
84
85 *wp = *dtle;
86 barrier();
87
88
89 if (index + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx))
90 return;
91
92 ++wp;
93 if (wp == dtlr->buf_end)
94 wp = dtlr->buf;
95 dtlr->write_ptr = wp;
96
97
98 smp_wmb();
99 ++dtlr->write_index;
100}
101
102static int dtl_start(struct dtl *dtl)
103{
104 struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
105
106 dtlr->buf = dtl->buf;
107 dtlr->buf_end = dtl->buf + dtl->buf_entries;
108 dtlr->write_index = 0;
109
110
111 smp_wmb();
112 dtlr->write_ptr = dtl->buf;
113
114
115 dtlr->saved_dtl_mask = lppaca_of(dtl->cpu).dtl_enable_mask;
116 lppaca_of(dtl->cpu).dtl_enable_mask |= dtl_event_mask;
117
118 dtl_consumer = consume_dtle;
119 atomic_inc(&dtl_count);
120 return 0;
121}
122
123static void dtl_stop(struct dtl *dtl)
124{
125 struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
126
127 dtlr->write_ptr = NULL;
128 smp_wmb();
129
130 dtlr->buf = NULL;
131
132
133 lppaca_of(dtl->cpu).dtl_enable_mask = dtlr->saved_dtl_mask;
134
135 if (atomic_dec_and_test(&dtl_count))
136 dtl_consumer = NULL;
137}
138
139static u64 dtl_current_index(struct dtl *dtl)
140{
141 return per_cpu(dtl_rings, dtl->cpu).write_index;
142}
143
144#else
145
146static int dtl_start(struct dtl *dtl)
147{
148 unsigned long addr;
149 int ret, hwcpu;
150
151
152
153 ((u32 *)dtl->buf)[1] = DISPATCH_LOG_BYTES;
154
155 hwcpu = get_hard_smp_processor_id(dtl->cpu);
156 addr = __pa(dtl->buf);
157 ret = register_dtl(hwcpu, addr);
158 if (ret) {
159 printk(KERN_WARNING "%s: DTL registration for cpu %d (hw %d) "
160 "failed with %d\n", __func__, dtl->cpu, hwcpu, ret);
161 return -EIO;
162 }
163
164
165 lppaca_of(dtl->cpu).dtl_idx = 0;
166
167
168
169 smp_wmb();
170
171
172 lppaca_of(dtl->cpu).dtl_enable_mask = dtl_event_mask;
173
174 return 0;
175}
176
177static void dtl_stop(struct dtl *dtl)
178{
179 int hwcpu = get_hard_smp_processor_id(dtl->cpu);
180
181 lppaca_of(dtl->cpu).dtl_enable_mask = 0x0;
182
183 unregister_dtl(hwcpu);
184}
185
186static u64 dtl_current_index(struct dtl *dtl)
187{
188 return lppaca_of(dtl->cpu).dtl_idx;
189}
190#endif
191
192static int dtl_enable(struct dtl *dtl)
193{
194 long int n_entries;
195 long int rc;
196 struct dtl_entry *buf = NULL;
197
198 if (!dtl_cache)
199 return -ENOMEM;
200
201
202 if (dtl->buf)
203 return -EBUSY;
204
205 n_entries = dtl_buf_entries;
206 buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL, cpu_to_node(dtl->cpu));
207 if (!buf) {
208 printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n",
209 __func__, dtl->cpu);
210 return -ENOMEM;
211 }
212
213 spin_lock(&dtl->lock);
214 rc = -EBUSY;
215 if (!dtl->buf) {
216
217 dtl->buf_entries = n_entries;
218 dtl->buf = buf;
219 dtl->last_idx = 0;
220 rc = dtl_start(dtl);
221 if (rc)
222 dtl->buf = NULL;
223 }
224 spin_unlock(&dtl->lock);
225
226 if (rc)
227 kmem_cache_free(dtl_cache, buf);
228 return rc;
229}
230
231static void dtl_disable(struct dtl *dtl)
232{
233 spin_lock(&dtl->lock);
234 dtl_stop(dtl);
235 kmem_cache_free(dtl_cache, dtl->buf);
236 dtl->buf = NULL;
237 dtl->buf_entries = 0;
238 spin_unlock(&dtl->lock);
239}
240
241
242
243static int dtl_file_open(struct inode *inode, struct file *filp)
244{
245 struct dtl *dtl = inode->i_private;
246 int rc;
247
248 rc = dtl_enable(dtl);
249 if (rc)
250 return rc;
251
252 filp->private_data = dtl;
253 return 0;
254}
255
256static int dtl_file_release(struct inode *inode, struct file *filp)
257{
258 struct dtl *dtl = inode->i_private;
259 dtl_disable(dtl);
260 return 0;
261}
262
263static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len,
264 loff_t *pos)
265{
266 long int rc, n_read, n_req, read_size;
267 struct dtl *dtl;
268 u64 cur_idx, last_idx, i;
269
270 if ((len % sizeof(struct dtl_entry)) != 0)
271 return -EINVAL;
272
273 dtl = filp->private_data;
274
275
276 n_req = len / sizeof(struct dtl_entry);
277
278
279 n_read = 0;
280
281 spin_lock(&dtl->lock);
282
283 cur_idx = dtl_current_index(dtl);
284 last_idx = dtl->last_idx;
285
286 if (last_idx + dtl->buf_entries <= cur_idx)
287 last_idx = cur_idx - dtl->buf_entries + 1;
288
289 if (last_idx + n_req > cur_idx)
290 n_req = cur_idx - last_idx;
291
292 if (n_req > 0)
293 dtl->last_idx = last_idx + n_req;
294
295 spin_unlock(&dtl->lock);
296
297 if (n_req <= 0)
298 return 0;
299
300 i = last_idx % dtl->buf_entries;
301
302
303 if (i + n_req > dtl->buf_entries) {
304 read_size = dtl->buf_entries - i;
305
306 rc = copy_to_user(buf, &dtl->buf[i],
307 read_size * sizeof(struct dtl_entry));
308 if (rc)
309 return -EFAULT;
310
311 i = 0;
312 n_req -= read_size;
313 n_read += read_size;
314 buf += read_size * sizeof(struct dtl_entry);
315 }
316
317
318 rc = copy_to_user(buf, &dtl->buf[i], n_req * sizeof(struct dtl_entry));
319 if (rc)
320 return -EFAULT;
321
322 n_read += n_req;
323
324 return n_read * sizeof(struct dtl_entry);
325}
326
327static const struct file_operations dtl_fops = {
328 .open = dtl_file_open,
329 .release = dtl_file_release,
330 .read = dtl_file_read,
331 .llseek = no_llseek,
332};
333
334static struct dentry *dtl_dir;
335
336static int dtl_setup_file(struct dtl *dtl)
337{
338 char name[10];
339
340 sprintf(name, "cpu-%d", dtl->cpu);
341
342 dtl->file = debugfs_create_file(name, 0400, dtl_dir, dtl, &dtl_fops);
343 if (!dtl->file)
344 return -ENOMEM;
345
346 return 0;
347}
348
349static int dtl_init(void)
350{
351 struct dentry *event_mask_file, *buf_entries_file;
352 int rc, i;
353
354 if (!firmware_has_feature(FW_FEATURE_SPLPAR))
355 return -ENODEV;
356
357
358
359 rc = -ENOMEM;
360 dtl_dir = debugfs_create_dir("dtl", powerpc_debugfs_root);
361 if (!dtl_dir) {
362 printk(KERN_WARNING "%s: can't create dtl root dir\n",
363 __func__);
364 goto err;
365 }
366
367 event_mask_file = debugfs_create_x8("dtl_event_mask", 0600,
368 dtl_dir, &dtl_event_mask);
369 buf_entries_file = debugfs_create_u32("dtl_buf_entries", 0400,
370 dtl_dir, &dtl_buf_entries);
371
372 if (!event_mask_file || !buf_entries_file) {
373 printk(KERN_WARNING "%s: can't create dtl files\n", __func__);
374 goto err_remove_dir;
375 }
376
377
378 for_each_possible_cpu(i) {
379 struct dtl *dtl = &per_cpu(cpu_dtl, i);
380 spin_lock_init(&dtl->lock);
381 dtl->cpu = i;
382
383 rc = dtl_setup_file(dtl);
384 if (rc)
385 goto err_remove_dir;
386 }
387
388 return 0;
389
390err_remove_dir:
391 debugfs_remove_recursive(dtl_dir);
392err:
393 return rc;
394}
395machine_arch_initcall(pseries, dtl_init);
396