1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#define pr_fmt(fmt) "xen_cpu: " fmt
35
36#include <linux/interrupt.h>
37#include <linux/spinlock.h>
38#include <linux/cpu.h>
39#include <linux/stat.h>
40#include <linux/capability.h>
41
42#include <xen/xen.h>
43#include <xen/acpi.h>
44#include <xen/xenbus.h>
45#include <xen/events.h>
46#include <xen/interface/platform.h>
47#include <asm/xen/hypervisor.h>
48#include <asm/xen/hypercall.h>
49
50
51
52
53
54
55
56
57struct pcpu {
58 struct list_head list;
59 struct device dev;
60 uint32_t cpu_id;
61 uint32_t flags;
62};
63
64static struct bus_type xen_pcpu_subsys = {
65 .name = "xen_cpu",
66 .dev_name = "xen_cpu",
67};
68
69static DEFINE_MUTEX(xen_pcpu_lock);
70
71static LIST_HEAD(xen_pcpus);
72
73static int xen_pcpu_down(uint32_t cpu_id)
74{
75 struct xen_platform_op op = {
76 .cmd = XENPF_cpu_offline,
77 .interface_version = XENPF_INTERFACE_VERSION,
78 .u.cpu_ol.cpuid = cpu_id,
79 };
80
81 return HYPERVISOR_platform_op(&op);
82}
83
84static int xen_pcpu_up(uint32_t cpu_id)
85{
86 struct xen_platform_op op = {
87 .cmd = XENPF_cpu_online,
88 .interface_version = XENPF_INTERFACE_VERSION,
89 .u.cpu_ol.cpuid = cpu_id,
90 };
91
92 return HYPERVISOR_platform_op(&op);
93}
94
95static ssize_t online_show(struct device *dev,
96 struct device_attribute *attr,
97 char *buf)
98{
99 struct pcpu *cpu = container_of(dev, struct pcpu, dev);
100
101 return sprintf(buf, "%u\n", !!(cpu->flags & XEN_PCPU_FLAGS_ONLINE));
102}
103
104static ssize_t __ref online_store(struct device *dev,
105 struct device_attribute *attr,
106 const char *buf, size_t count)
107{
108 struct pcpu *pcpu = container_of(dev, struct pcpu, dev);
109 unsigned long long val;
110 ssize_t ret;
111
112 if (!capable(CAP_SYS_ADMIN))
113 return -EPERM;
114
115 if (kstrtoull(buf, 0, &val) < 0)
116 return -EINVAL;
117
118 switch (val) {
119 case 0:
120 ret = xen_pcpu_down(pcpu->cpu_id);
121 break;
122 case 1:
123 ret = xen_pcpu_up(pcpu->cpu_id);
124 break;
125 default:
126 ret = -EINVAL;
127 }
128
129 if (ret >= 0)
130 ret = count;
131 return ret;
132}
133static DEVICE_ATTR_RW(online);
134
135static struct attribute *pcpu_dev_attrs[] = {
136 &dev_attr_online.attr,
137 NULL
138};
139
140static umode_t pcpu_dev_is_visible(struct kobject *kobj,
141 struct attribute *attr, int idx)
142{
143 struct device *dev = kobj_to_dev(kobj);
144
145
146
147
148
149 return dev->id ? attr->mode : 0;
150}
151
152static const struct attribute_group pcpu_dev_group = {
153 .attrs = pcpu_dev_attrs,
154 .is_visible = pcpu_dev_is_visible,
155};
156
157static const struct attribute_group *pcpu_dev_groups[] = {
158 &pcpu_dev_group,
159 NULL
160};
161
162static bool xen_pcpu_online(uint32_t flags)
163{
164 return !!(flags & XEN_PCPU_FLAGS_ONLINE);
165}
166
167static void pcpu_online_status(struct xenpf_pcpuinfo *info,
168 struct pcpu *pcpu)
169{
170 if (xen_pcpu_online(info->flags) &&
171 !xen_pcpu_online(pcpu->flags)) {
172
173 pcpu->flags |= XEN_PCPU_FLAGS_ONLINE;
174 kobject_uevent(&pcpu->dev.kobj, KOBJ_ONLINE);
175 } else if (!xen_pcpu_online(info->flags) &&
176 xen_pcpu_online(pcpu->flags)) {
177
178 pcpu->flags &= ~XEN_PCPU_FLAGS_ONLINE;
179 kobject_uevent(&pcpu->dev.kobj, KOBJ_OFFLINE);
180 }
181}
182
183static struct pcpu *get_pcpu(uint32_t cpu_id)
184{
185 struct pcpu *pcpu;
186
187 list_for_each_entry(pcpu, &xen_pcpus, list) {
188 if (pcpu->cpu_id == cpu_id)
189 return pcpu;
190 }
191
192 return NULL;
193}
194
195static void pcpu_release(struct device *dev)
196{
197 struct pcpu *pcpu = container_of(dev, struct pcpu, dev);
198
199 list_del(&pcpu->list);
200 kfree(pcpu);
201}
202
203static void unregister_and_remove_pcpu(struct pcpu *pcpu)
204{
205 struct device *dev;
206
207 if (!pcpu)
208 return;
209
210 dev = &pcpu->dev;
211
212 device_unregister(dev);
213}
214
215static int register_pcpu(struct pcpu *pcpu)
216{
217 struct device *dev;
218 int err = -EINVAL;
219
220 if (!pcpu)
221 return err;
222
223 dev = &pcpu->dev;
224 dev->bus = &xen_pcpu_subsys;
225 dev->id = pcpu->cpu_id;
226 dev->release = pcpu_release;
227 dev->groups = pcpu_dev_groups;
228
229 err = device_register(dev);
230 if (err) {
231 pcpu_release(dev);
232 return err;
233 }
234
235 return 0;
236}
237
238static struct pcpu *create_and_register_pcpu(struct xenpf_pcpuinfo *info)
239{
240 struct pcpu *pcpu;
241 int err;
242
243 if (info->flags & XEN_PCPU_FLAGS_INVALID)
244 return ERR_PTR(-ENODEV);
245
246 pcpu = kzalloc(sizeof(struct pcpu), GFP_KERNEL);
247 if (!pcpu)
248 return ERR_PTR(-ENOMEM);
249
250 INIT_LIST_HEAD(&pcpu->list);
251 pcpu->cpu_id = info->xen_cpuid;
252 pcpu->flags = info->flags;
253
254
255 list_add_tail(&pcpu->list, &xen_pcpus);
256
257 err = register_pcpu(pcpu);
258 if (err) {
259 pr_warn("Failed to register pcpu%u\n", info->xen_cpuid);
260 return ERR_PTR(-ENOENT);
261 }
262
263 return pcpu;
264}
265
266
267
268
269static int sync_pcpu(uint32_t cpu, uint32_t *max_cpu)
270{
271 int ret;
272 struct pcpu *pcpu = NULL;
273 struct xenpf_pcpuinfo *info;
274 struct xen_platform_op op = {
275 .cmd = XENPF_get_cpuinfo,
276 .interface_version = XENPF_INTERFACE_VERSION,
277 .u.pcpu_info.xen_cpuid = cpu,
278 };
279
280 ret = HYPERVISOR_platform_op(&op);
281 if (ret)
282 return ret;
283
284 info = &op.u.pcpu_info;
285 if (max_cpu)
286 *max_cpu = info->max_present;
287
288 pcpu = get_pcpu(cpu);
289
290
291
292
293 if (info->flags & XEN_PCPU_FLAGS_INVALID) {
294 unregister_and_remove_pcpu(pcpu);
295 return 0;
296 }
297
298 if (!pcpu) {
299 pcpu = create_and_register_pcpu(info);
300 if (IS_ERR_OR_NULL(pcpu))
301 return -ENODEV;
302 } else
303 pcpu_online_status(info, pcpu);
304
305 return 0;
306}
307
308
309
310
311static int xen_sync_pcpus(void)
312{
313
314
315
316 uint32_t cpu = 0, max_cpu = 0;
317 int err = 0;
318 struct pcpu *pcpu, *tmp;
319
320 mutex_lock(&xen_pcpu_lock);
321
322 while (!err && (cpu <= max_cpu)) {
323 err = sync_pcpu(cpu, &max_cpu);
324 cpu++;
325 }
326
327 if (err)
328 list_for_each_entry_safe(pcpu, tmp, &xen_pcpus, list)
329 unregister_and_remove_pcpu(pcpu);
330
331 mutex_unlock(&xen_pcpu_lock);
332
333 return err;
334}
335
336static void xen_pcpu_work_fn(struct work_struct *work)
337{
338 xen_sync_pcpus();
339}
340static DECLARE_WORK(xen_pcpu_work, xen_pcpu_work_fn);
341
342static irqreturn_t xen_pcpu_interrupt(int irq, void *dev_id)
343{
344 schedule_work(&xen_pcpu_work);
345 return IRQ_HANDLED;
346}
347
348static int __init xen_pcpu_init(void)
349{
350 int irq, ret;
351
352 if (!xen_initial_domain())
353 return -ENODEV;
354
355 irq = bind_virq_to_irqhandler(VIRQ_PCPU_STATE, 0,
356 xen_pcpu_interrupt, 0,
357 "xen-pcpu", NULL);
358 if (irq < 0) {
359 pr_warn("Failed to bind pcpu virq\n");
360 return irq;
361 }
362
363 ret = subsys_system_register(&xen_pcpu_subsys, NULL);
364 if (ret) {
365 pr_warn("Failed to register pcpu subsys\n");
366 goto err1;
367 }
368
369 ret = xen_sync_pcpus();
370 if (ret) {
371 pr_warn("Failed to sync pcpu info\n");
372 goto err2;
373 }
374
375 return 0;
376
377err2:
378 bus_unregister(&xen_pcpu_subsys);
379err1:
380 unbind_from_irqhandler(irq, NULL);
381 return ret;
382}
383arch_initcall(xen_pcpu_init);
384