1
2
3
4
5
6
7
8#include <linux/perf_event.h>
9#include <linux/platform_device.h>
10#include <linux/oprofile.h>
11#include <linux/slab.h>
12
13
14
15
16struct op_counter_config {
17 unsigned long count;
18 unsigned long enabled;
19 unsigned long event;
20 unsigned long unit_mask;
21 unsigned long kernel;
22 unsigned long user;
23 struct perf_event_attr attr;
24};
25
26static int oprofile_perf_enabled;
27static DEFINE_MUTEX(oprofile_perf_mutex);
28
29static struct op_counter_config *counter_config;
30static DEFINE_PER_CPU(struct perf_event **, perf_events);
31static int num_counters;
32
33
34
35
36static void op_overflow_handler(struct perf_event *event,
37 struct perf_sample_data *data, struct pt_regs *regs)
38{
39 int id;
40 u32 cpu = smp_processor_id();
41
42 for (id = 0; id < num_counters; ++id)
43 if (per_cpu(perf_events, cpu)[id] == event)
44 break;
45
46 if (id != num_counters)
47 oprofile_add_sample(regs, id);
48 else
49 pr_warn("oprofile: ignoring spurious overflow on cpu %u\n",
50 cpu);
51}
52
53
54
55
56
57
58static void op_perf_setup(void)
59{
60 int i;
61 u32 size = sizeof(struct perf_event_attr);
62 struct perf_event_attr *attr;
63
64 for (i = 0; i < num_counters; ++i) {
65 attr = &counter_config[i].attr;
66 memset(attr, 0, size);
67 attr->type = PERF_TYPE_RAW;
68 attr->size = size;
69 attr->config = counter_config[i].event;
70 attr->sample_period = counter_config[i].count;
71 attr->pinned = 1;
72 }
73}
74
75static int op_create_counter(int cpu, int event)
76{
77 struct perf_event *pevent;
78
79 if (!counter_config[event].enabled || per_cpu(perf_events, cpu)[event])
80 return 0;
81
82 pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
83 cpu, NULL,
84 op_overflow_handler, NULL);
85
86 if (IS_ERR(pevent))
87 return PTR_ERR(pevent);
88
89 if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
90 perf_event_release_kernel(pevent);
91 pr_warn("oprofile: failed to enable event %d on CPU %d\n",
92 event, cpu);
93 return -EBUSY;
94 }
95
96 per_cpu(perf_events, cpu)[event] = pevent;
97
98 return 0;
99}
100
101static void op_destroy_counter(int cpu, int event)
102{
103 struct perf_event *pevent = per_cpu(perf_events, cpu)[event];
104
105 if (pevent) {
106 perf_event_release_kernel(pevent);
107 per_cpu(perf_events, cpu)[event] = NULL;
108 }
109}
110
111
112
113
114
115static int op_perf_start(void)
116{
117 int cpu, event, ret = 0;
118
119 for_each_online_cpu(cpu) {
120 for (event = 0; event < num_counters; ++event) {
121 ret = op_create_counter(cpu, event);
122 if (ret)
123 return ret;
124 }
125 }
126
127 return ret;
128}
129
130
131
132
133static void op_perf_stop(void)
134{
135 int cpu, event;
136
137 for_each_online_cpu(cpu)
138 for (event = 0; event < num_counters; ++event)
139 op_destroy_counter(cpu, event);
140}
141
142static int oprofile_perf_create_files(struct dentry *root)
143{
144 unsigned int i;
145
146 for (i = 0; i < num_counters; i++) {
147 struct dentry *dir;
148 char buf[4];
149
150 snprintf(buf, sizeof buf, "%d", i);
151 dir = oprofilefs_mkdir(root, buf);
152 oprofilefs_create_ulong(dir, "enabled", &counter_config[i].enabled);
153 oprofilefs_create_ulong(dir, "event", &counter_config[i].event);
154 oprofilefs_create_ulong(dir, "count", &counter_config[i].count);
155 oprofilefs_create_ulong(dir, "unit_mask", &counter_config[i].unit_mask);
156 oprofilefs_create_ulong(dir, "kernel", &counter_config[i].kernel);
157 oprofilefs_create_ulong(dir, "user", &counter_config[i].user);
158 }
159
160 return 0;
161}
162
163static int oprofile_perf_setup(void)
164{
165 raw_spin_lock(&oprofilefs_lock);
166 op_perf_setup();
167 raw_spin_unlock(&oprofilefs_lock);
168 return 0;
169}
170
171static int oprofile_perf_start(void)
172{
173 int ret = -EBUSY;
174
175 mutex_lock(&oprofile_perf_mutex);
176 if (!oprofile_perf_enabled) {
177 ret = 0;
178 op_perf_start();
179 oprofile_perf_enabled = 1;
180 }
181 mutex_unlock(&oprofile_perf_mutex);
182 return ret;
183}
184
185static void oprofile_perf_stop(void)
186{
187 mutex_lock(&oprofile_perf_mutex);
188 if (oprofile_perf_enabled)
189 op_perf_stop();
190 oprofile_perf_enabled = 0;
191 mutex_unlock(&oprofile_perf_mutex);
192}
193
194#ifdef CONFIG_PM
195
196static int oprofile_perf_suspend(struct platform_device *dev, pm_message_t state)
197{
198 mutex_lock(&oprofile_perf_mutex);
199 if (oprofile_perf_enabled)
200 op_perf_stop();
201 mutex_unlock(&oprofile_perf_mutex);
202 return 0;
203}
204
205static int oprofile_perf_resume(struct platform_device *dev)
206{
207 mutex_lock(&oprofile_perf_mutex);
208 if (oprofile_perf_enabled && op_perf_start())
209 oprofile_perf_enabled = 0;
210 mutex_unlock(&oprofile_perf_mutex);
211 return 0;
212}
213
214static struct platform_driver oprofile_driver = {
215 .driver = {
216 .name = "oprofile-perf",
217 },
218 .resume = oprofile_perf_resume,
219 .suspend = oprofile_perf_suspend,
220};
221
222static struct platform_device *oprofile_pdev;
223
224static int __init init_driverfs(void)
225{
226 int ret;
227
228 ret = platform_driver_register(&oprofile_driver);
229 if (ret)
230 return ret;
231
232 oprofile_pdev = platform_device_register_simple(
233 oprofile_driver.driver.name, 0, NULL, 0);
234 if (IS_ERR(oprofile_pdev)) {
235 ret = PTR_ERR(oprofile_pdev);
236 platform_driver_unregister(&oprofile_driver);
237 }
238
239 return ret;
240}
241
242static void exit_driverfs(void)
243{
244 platform_device_unregister(oprofile_pdev);
245 platform_driver_unregister(&oprofile_driver);
246}
247
248#else
249
250static inline int init_driverfs(void) { return 0; }
251static inline void exit_driverfs(void) { }
252
253#endif
254
255void oprofile_perf_exit(void)
256{
257 int cpu, id;
258 struct perf_event *event;
259
260 for_each_possible_cpu(cpu) {
261 for (id = 0; id < num_counters; ++id) {
262 event = per_cpu(perf_events, cpu)[id];
263 if (event)
264 perf_event_release_kernel(event);
265 }
266
267 kfree(per_cpu(perf_events, cpu));
268 }
269
270 kfree(counter_config);
271 exit_driverfs();
272}
273
274int __init oprofile_perf_init(struct oprofile_operations *ops)
275{
276 int cpu, ret = 0;
277
278 ret = init_driverfs();
279 if (ret)
280 return ret;
281
282 num_counters = perf_num_counters();
283 if (num_counters <= 0) {
284 pr_info("oprofile: no performance counters\n");
285 ret = -ENODEV;
286 goto out;
287 }
288
289 counter_config = kcalloc(num_counters,
290 sizeof(struct op_counter_config), GFP_KERNEL);
291
292 if (!counter_config) {
293 pr_info("oprofile: failed to allocate %d "
294 "counters\n", num_counters);
295 ret = -ENOMEM;
296 num_counters = 0;
297 goto out;
298 }
299
300 for_each_possible_cpu(cpu) {
301 per_cpu(perf_events, cpu) = kcalloc(num_counters,
302 sizeof(struct perf_event *), GFP_KERNEL);
303 if (!per_cpu(perf_events, cpu)) {
304 pr_info("oprofile: failed to allocate %d perf events "
305 "for cpu %d\n", num_counters, cpu);
306 ret = -ENOMEM;
307 goto out;
308 }
309 }
310
311 ops->create_files = oprofile_perf_create_files;
312 ops->setup = oprofile_perf_setup;
313 ops->start = oprofile_perf_start;
314 ops->stop = oprofile_perf_stop;
315 ops->shutdown = oprofile_perf_stop;
316 ops->cpu_type = op_name_from_perf_id();
317
318 if (!ops->cpu_type)
319 ret = -ENODEV;
320 else
321 pr_info("oprofile: using %s\n", ops->cpu_type);
322
323out:
324 if (ret)
325 oprofile_perf_exit();
326
327 return ret;
328}
329