1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/slab.h>
24#include <linux/spinlock.h>
25#include <asm/smp.h>
26#include <linux/uaccess.h>
27#include <asm/firmware.h>
28#include <asm/lppaca.h>
29#include <asm/debugfs.h>
30#include <asm/plpar_wrappers.h>
31#include <asm/machdep.h>
32
33struct dtl {
34 struct dtl_entry *buf;
35 struct dentry *file;
36 int cpu;
37 int buf_entries;
38 u64 last_idx;
39 spinlock_t lock;
40};
41static DEFINE_PER_CPU(struct dtl, cpu_dtl);
42
43
44
45
46
47
48
49static u8 dtl_event_mask = 0x7;
50
51
52
53
54
55
56static int dtl_buf_entries = N_DISPATCH_LOG;
57
58#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
59struct dtl_ring {
60 u64 write_index;
61 struct dtl_entry *write_ptr;
62 struct dtl_entry *buf;
63 struct dtl_entry *buf_end;
64 u8 saved_dtl_mask;
65};
66
67static DEFINE_PER_CPU(struct dtl_ring, dtl_rings);
68
69static atomic_t dtl_count;
70
71
72
73
74
75static void consume_dtle(struct dtl_entry *dtle, u64 index)
76{
77 struct dtl_ring *dtlr = this_cpu_ptr(&dtl_rings);
78 struct dtl_entry *wp = dtlr->write_ptr;
79 struct lppaca *vpa = local_paca->lppaca_ptr;
80
81 if (!wp)
82 return;
83
84 *wp = *dtle;
85 barrier();
86
87
88 if (index + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx))
89 return;
90
91 ++wp;
92 if (wp == dtlr->buf_end)
93 wp = dtlr->buf;
94 dtlr->write_ptr = wp;
95
96
97 smp_wmb();
98 ++dtlr->write_index;
99}
100
101static int dtl_start(struct dtl *dtl)
102{
103 struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
104
105 dtlr->buf = dtl->buf;
106 dtlr->buf_end = dtl->buf + dtl->buf_entries;
107 dtlr->write_index = 0;
108
109
110 smp_wmb();
111 dtlr->write_ptr = dtl->buf;
112
113
114 dtlr->saved_dtl_mask = lppaca_of(dtl->cpu).dtl_enable_mask;
115 lppaca_of(dtl->cpu).dtl_enable_mask |= dtl_event_mask;
116
117 dtl_consumer = consume_dtle;
118 atomic_inc(&dtl_count);
119 return 0;
120}
121
122static void dtl_stop(struct dtl *dtl)
123{
124 struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
125
126 dtlr->write_ptr = NULL;
127 smp_wmb();
128
129 dtlr->buf = NULL;
130
131
132 lppaca_of(dtl->cpu).dtl_enable_mask = dtlr->saved_dtl_mask;
133
134 if (atomic_dec_and_test(&dtl_count))
135 dtl_consumer = NULL;
136}
137
138static u64 dtl_current_index(struct dtl *dtl)
139{
140 return per_cpu(dtl_rings, dtl->cpu).write_index;
141}
142
143#else
144
145static int dtl_start(struct dtl *dtl)
146{
147 unsigned long addr;
148 int ret, hwcpu;
149
150
151
152 ((u32 *)dtl->buf)[1] = cpu_to_be32(DISPATCH_LOG_BYTES);
153
154 hwcpu = get_hard_smp_processor_id(dtl->cpu);
155 addr = __pa(dtl->buf);
156 ret = register_dtl(hwcpu, addr);
157 if (ret) {
158 printk(KERN_WARNING "%s: DTL registration for cpu %d (hw %d) "
159 "failed with %d\n", __func__, dtl->cpu, hwcpu, ret);
160 return -EIO;
161 }
162
163
164 lppaca_of(dtl->cpu).dtl_idx = 0;
165
166
167
168 smp_wmb();
169
170
171 lppaca_of(dtl->cpu).dtl_enable_mask = dtl_event_mask;
172
173 return 0;
174}
175
176static void dtl_stop(struct dtl *dtl)
177{
178 int hwcpu = get_hard_smp_processor_id(dtl->cpu);
179
180 lppaca_of(dtl->cpu).dtl_enable_mask = 0x0;
181
182 unregister_dtl(hwcpu);
183}
184
185static u64 dtl_current_index(struct dtl *dtl)
186{
187 return be64_to_cpu(lppaca_of(dtl->cpu).dtl_idx);
188}
189#endif
190
191static int dtl_enable(struct dtl *dtl)
192{
193 long int n_entries;
194 long int rc;
195 struct dtl_entry *buf = NULL;
196
197 if (!dtl_cache)
198 return -ENOMEM;
199
200
201 if (dtl->buf)
202 return -EBUSY;
203
204 n_entries = dtl_buf_entries;
205 buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL, cpu_to_node(dtl->cpu));
206 if (!buf) {
207 printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n",
208 __func__, dtl->cpu);
209 return -ENOMEM;
210 }
211
212 spin_lock(&dtl->lock);
213 rc = -EBUSY;
214 if (!dtl->buf) {
215
216 dtl->buf_entries = n_entries;
217 dtl->buf = buf;
218 dtl->last_idx = 0;
219 rc = dtl_start(dtl);
220 if (rc)
221 dtl->buf = NULL;
222 }
223 spin_unlock(&dtl->lock);
224
225 if (rc)
226 kmem_cache_free(dtl_cache, buf);
227 return rc;
228}
229
230static void dtl_disable(struct dtl *dtl)
231{
232 spin_lock(&dtl->lock);
233 dtl_stop(dtl);
234 kmem_cache_free(dtl_cache, dtl->buf);
235 dtl->buf = NULL;
236 dtl->buf_entries = 0;
237 spin_unlock(&dtl->lock);
238}
239
240
241
242static int dtl_file_open(struct inode *inode, struct file *filp)
243{
244 struct dtl *dtl = inode->i_private;
245 int rc;
246
247 rc = dtl_enable(dtl);
248 if (rc)
249 return rc;
250
251 filp->private_data = dtl;
252 return 0;
253}
254
255static int dtl_file_release(struct inode *inode, struct file *filp)
256{
257 struct dtl *dtl = inode->i_private;
258 dtl_disable(dtl);
259 return 0;
260}
261
262static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len,
263 loff_t *pos)
264{
265 long int rc, n_read, n_req, read_size;
266 struct dtl *dtl;
267 u64 cur_idx, last_idx, i;
268
269 if ((len % sizeof(struct dtl_entry)) != 0)
270 return -EINVAL;
271
272 dtl = filp->private_data;
273
274
275 n_req = len / sizeof(struct dtl_entry);
276
277
278 n_read = 0;
279
280 spin_lock(&dtl->lock);
281
282 cur_idx = dtl_current_index(dtl);
283 last_idx = dtl->last_idx;
284
285 if (last_idx + dtl->buf_entries <= cur_idx)
286 last_idx = cur_idx - dtl->buf_entries + 1;
287
288 if (last_idx + n_req > cur_idx)
289 n_req = cur_idx - last_idx;
290
291 if (n_req > 0)
292 dtl->last_idx = last_idx + n_req;
293
294 spin_unlock(&dtl->lock);
295
296 if (n_req <= 0)
297 return 0;
298
299 i = last_idx % dtl->buf_entries;
300
301
302 if (i + n_req > dtl->buf_entries) {
303 read_size = dtl->buf_entries - i;
304
305 rc = copy_to_user(buf, &dtl->buf[i],
306 read_size * sizeof(struct dtl_entry));
307 if (rc)
308 return -EFAULT;
309
310 i = 0;
311 n_req -= read_size;
312 n_read += read_size;
313 buf += read_size * sizeof(struct dtl_entry);
314 }
315
316
317 rc = copy_to_user(buf, &dtl->buf[i], n_req * sizeof(struct dtl_entry));
318 if (rc)
319 return -EFAULT;
320
321 n_read += n_req;
322
323 return n_read * sizeof(struct dtl_entry);
324}
325
326static const struct file_operations dtl_fops = {
327 .open = dtl_file_open,
328 .release = dtl_file_release,
329 .read = dtl_file_read,
330 .llseek = no_llseek,
331};
332
333static struct dentry *dtl_dir;
334
335static int dtl_setup_file(struct dtl *dtl)
336{
337 char name[10];
338
339 sprintf(name, "cpu-%d", dtl->cpu);
340
341 dtl->file = debugfs_create_file(name, 0400, dtl_dir, dtl, &dtl_fops);
342 if (!dtl->file)
343 return -ENOMEM;
344
345 return 0;
346}
347
348static int dtl_init(void)
349{
350 struct dentry *event_mask_file, *buf_entries_file;
351 int rc, i;
352
353 if (!firmware_has_feature(FW_FEATURE_SPLPAR))
354 return -ENODEV;
355
356
357
358 rc = -ENOMEM;
359 dtl_dir = debugfs_create_dir("dtl", powerpc_debugfs_root);
360 if (!dtl_dir) {
361 printk(KERN_WARNING "%s: can't create dtl root dir\n",
362 __func__);
363 goto err;
364 }
365
366 event_mask_file = debugfs_create_x8("dtl_event_mask", 0600,
367 dtl_dir, &dtl_event_mask);
368 buf_entries_file = debugfs_create_u32("dtl_buf_entries", 0400,
369 dtl_dir, &dtl_buf_entries);
370
371 if (!event_mask_file || !buf_entries_file) {
372 printk(KERN_WARNING "%s: can't create dtl files\n", __func__);
373 goto err_remove_dir;
374 }
375
376
377 for_each_possible_cpu(i) {
378 struct dtl *dtl = &per_cpu(cpu_dtl, i);
379 spin_lock_init(&dtl->lock);
380 dtl->cpu = i;
381
382 rc = dtl_setup_file(dtl);
383 if (rc)
384 goto err_remove_dir;
385 }
386
387 return 0;
388
389err_remove_dir:
390 debugfs_remove_recursive(dtl_dir);
391err:
392 return rc;
393}
394machine_arch_initcall(pseries, dtl_init);
395