1
2
3
4
5
6
7#include <linux/acpi.h>
8#include <linux/crash_dump.h>
9#include <linux/visorbus.h>
10
11#include "visorbus_private.h"
12
13
14#define VISOR_SIOVM_GUID GUID_INIT(0x72120008, 0x4AAB, 0x11DC, 0x85, 0x30, \
15 0x44, 0x45, 0x53, 0x54, 0x42, 0x00)
16
17static const guid_t visor_vhba_channel_guid = VISOR_VHBA_CHANNEL_GUID;
18static const guid_t visor_siovm_guid = VISOR_SIOVM_GUID;
19static const guid_t visor_controlvm_channel_guid = VISOR_CONTROLVM_CHANNEL_GUID;
20
21#define POLLJIFFIES_CONTROLVM_FAST 1
22#define POLLJIFFIES_CONTROLVM_SLOW 100
23
24#define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)
25
26#define UNISYS_VISOR_LEAF_ID 0x40000000
27
28
29#define UNISYS_VISOR_ID_EBX 0x73696e55
30#define UNISYS_VISOR_ID_ECX 0x70537379
31#define UNISYS_VISOR_ID_EDX 0x34367261
32
33
34
35
36
37
38#define MIN_IDLE_SECONDS 10
39
40struct parser_context {
41 unsigned long allocbytes;
42 unsigned long param_bytes;
43 u8 *curr;
44 unsigned long bytes_remaining;
45 bool byte_stream;
46 struct visor_controlvm_parameters_header data;
47};
48
49
50#define VMCALL_CONTROLVM_ADDR 0x0501
51
52enum vmcall_result {
53 VMCALL_RESULT_SUCCESS = 0,
54 VMCALL_RESULT_INVALID_PARAM = 1,
55 VMCALL_RESULT_DATA_UNAVAILABLE = 2,
56 VMCALL_RESULT_FAILURE_UNAVAILABLE = 3,
57 VMCALL_RESULT_DEVICE_ERROR = 4,
58 VMCALL_RESULT_DEVICE_NOT_READY = 5
59};
60
61
62
63
64
65
66
67
68
69
70
71
72
73struct vmcall_io_controlvm_addr_params {
74 u64 address;
75 u32 channel_bytes;
76 u8 unused[4];
77} __packed;
78
79struct visorchipset_device {
80 struct acpi_device *acpi_device;
81 unsigned long poll_jiffies;
82
83 unsigned long most_recent_message_jiffies;
84 struct delayed_work periodic_controlvm_work;
85 struct visorchannel *controlvm_channel;
86 unsigned long controlvm_payload_bytes_buffered;
87
88
89
90
91
92
93
94 struct controlvm_message controlvm_pending_msg;
95 bool controlvm_pending_msg_valid;
96 struct vmcall_io_controlvm_addr_params controlvm_params;
97};
98
99static struct visorchipset_device *chipset_dev;
100
101struct parahotplug_request {
102 struct list_head list;
103 int id;
104 unsigned long expiration;
105 struct controlvm_message msg;
106};
107
108
109static ssize_t toolaction_show(struct device *dev,
110 struct device_attribute *attr,
111 char *buf)
112{
113 u8 tool_action = 0;
114 int err;
115
116 err = visorchannel_read(chipset_dev->controlvm_channel,
117 offsetof(struct visor_controlvm_channel,
118 tool_action),
119 &tool_action, sizeof(u8));
120 if (err)
121 return err;
122 return sprintf(buf, "%u\n", tool_action);
123}
124
125static ssize_t toolaction_store(struct device *dev,
126 struct device_attribute *attr,
127 const char *buf, size_t count)
128{
129 u8 tool_action;
130 int err;
131
132 if (kstrtou8(buf, 10, &tool_action))
133 return -EINVAL;
134 err = visorchannel_write(chipset_dev->controlvm_channel,
135 offsetof(struct visor_controlvm_channel,
136 tool_action),
137 &tool_action, sizeof(u8));
138 if (err)
139 return err;
140 return count;
141}
142static DEVICE_ATTR_RW(toolaction);
143
144static ssize_t boottotool_show(struct device *dev,
145 struct device_attribute *attr,
146 char *buf)
147{
148 struct efi_visor_indication efi_visor_indication;
149 int err;
150
151 err = visorchannel_read(chipset_dev->controlvm_channel,
152 offsetof(struct visor_controlvm_channel,
153 efi_visor_ind),
154 &efi_visor_indication,
155 sizeof(struct efi_visor_indication));
156 if (err)
157 return err;
158 return sprintf(buf, "%u\n", efi_visor_indication.boot_to_tool);
159}
160
161static ssize_t boottotool_store(struct device *dev,
162 struct device_attribute *attr,
163 const char *buf, size_t count)
164{
165 int val, err;
166 struct efi_visor_indication efi_visor_indication;
167
168 if (kstrtoint(buf, 10, &val))
169 return -EINVAL;
170 efi_visor_indication.boot_to_tool = val;
171 err = visorchannel_write(chipset_dev->controlvm_channel,
172 offsetof(struct visor_controlvm_channel,
173 efi_visor_ind),
174 &(efi_visor_indication),
175 sizeof(struct efi_visor_indication));
176 if (err)
177 return err;
178 return count;
179}
180static DEVICE_ATTR_RW(boottotool);
181
182static ssize_t error_show(struct device *dev, struct device_attribute *attr,
183 char *buf)
184{
185 u32 error = 0;
186 int err;
187
188 err = visorchannel_read(chipset_dev->controlvm_channel,
189 offsetof(struct visor_controlvm_channel,
190 installation_error),
191 &error, sizeof(u32));
192 if (err)
193 return err;
194 return sprintf(buf, "%u\n", error);
195}
196
197static ssize_t error_store(struct device *dev, struct device_attribute *attr,
198 const char *buf, size_t count)
199{
200 u32 error;
201 int err;
202
203 if (kstrtou32(buf, 10, &error))
204 return -EINVAL;
205 err = visorchannel_write(chipset_dev->controlvm_channel,
206 offsetof(struct visor_controlvm_channel,
207 installation_error),
208 &error, sizeof(u32));
209 if (err)
210 return err;
211 return count;
212}
213static DEVICE_ATTR_RW(error);
214
215static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
216 char *buf)
217{
218 u32 text_id = 0;
219 int err;
220
221 err = visorchannel_read(chipset_dev->controlvm_channel,
222 offsetof(struct visor_controlvm_channel,
223 installation_text_id),
224 &text_id, sizeof(u32));
225 if (err)
226 return err;
227 return sprintf(buf, "%u\n", text_id);
228}
229
230static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
231 const char *buf, size_t count)
232{
233 u32 text_id;
234 int err;
235
236 if (kstrtou32(buf, 10, &text_id))
237 return -EINVAL;
238 err = visorchannel_write(chipset_dev->controlvm_channel,
239 offsetof(struct visor_controlvm_channel,
240 installation_text_id),
241 &text_id, sizeof(u32));
242 if (err)
243 return err;
244 return count;
245}
246static DEVICE_ATTR_RW(textid);
247
248static ssize_t remaining_steps_show(struct device *dev,
249 struct device_attribute *attr, char *buf)
250{
251 u16 remaining_steps = 0;
252 int err;
253
254 err = visorchannel_read(chipset_dev->controlvm_channel,
255 offsetof(struct visor_controlvm_channel,
256 installation_remaining_steps),
257 &remaining_steps, sizeof(u16));
258 if (err)
259 return err;
260 return sprintf(buf, "%hu\n", remaining_steps);
261}
262
263static ssize_t remaining_steps_store(struct device *dev,
264 struct device_attribute *attr,
265 const char *buf, size_t count)
266{
267 u16 remaining_steps;
268 int err;
269
270 if (kstrtou16(buf, 10, &remaining_steps))
271 return -EINVAL;
272 err = visorchannel_write(chipset_dev->controlvm_channel,
273 offsetof(struct visor_controlvm_channel,
274 installation_remaining_steps),
275 &remaining_steps, sizeof(u16));
276 if (err)
277 return err;
278 return count;
279}
280static DEVICE_ATTR_RW(remaining_steps);
281
282static void controlvm_init_response(struct controlvm_message *msg,
283 struct controlvm_message_header *msg_hdr,
284 int response)
285{
286 memset(msg, 0, sizeof(struct controlvm_message));
287 memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
288 msg->hdr.payload_bytes = 0;
289 msg->hdr.payload_vm_offset = 0;
290 msg->hdr.payload_max_bytes = 0;
291 if (response < 0) {
292 msg->hdr.flags.failed = 1;
293 msg->hdr.completion_status = (u32)(-response);
294 }
295}
296
297static int controlvm_respond_chipset_init(
298 struct controlvm_message_header *msg_hdr,
299 int response,
300 enum visor_chipset_feature features)
301{
302 struct controlvm_message outmsg;
303
304 controlvm_init_response(&outmsg, msg_hdr, response);
305 outmsg.cmd.init_chipset.features = features;
306 return visorchannel_signalinsert(chipset_dev->controlvm_channel,
307 CONTROLVM_QUEUE_REQUEST, &outmsg);
308}
309
310static int chipset_init(struct controlvm_message *inmsg)
311{
312 static int chipset_inited;
313 enum visor_chipset_feature features = 0;
314 int rc = CONTROLVM_RESP_SUCCESS;
315 int res = 0;
316
317 if (chipset_inited) {
318 rc = -CONTROLVM_RESP_ALREADY_DONE;
319 res = -EIO;
320 goto out_respond;
321 }
322 chipset_inited = 1;
323
324
325
326
327
328 features = inmsg->cmd.init_chipset.features &
329 VISOR_CHIPSET_FEATURE_PARA_HOTPLUG;
330 features |= VISOR_CHIPSET_FEATURE_REPLY;
331
332out_respond:
333 if (inmsg->hdr.flags.response_expected)
334 res = controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
335
336 return res;
337}
338
339static int controlvm_respond(struct controlvm_message_header *msg_hdr,
340 int response, struct visor_segment_state *state)
341{
342 struct controlvm_message outmsg;
343
344 controlvm_init_response(&outmsg, msg_hdr, response);
345 if (outmsg.hdr.flags.test_message == 1)
346 return -EINVAL;
347 if (state) {
348 outmsg.cmd.device_change_state.state = *state;
349 outmsg.cmd.device_change_state.flags.phys_device = 1;
350 }
351 return visorchannel_signalinsert(chipset_dev->controlvm_channel,
352 CONTROLVM_QUEUE_REQUEST, &outmsg);
353}
354
355enum crash_obj_type {
356 CRASH_DEV,
357 CRASH_BUS,
358};
359
360static int save_crash_message(struct controlvm_message *msg,
361 enum crash_obj_type cr_type)
362{
363 u32 local_crash_msg_offset;
364 u16 local_crash_msg_count;
365 int err;
366
367 err = visorchannel_read(chipset_dev->controlvm_channel,
368 offsetof(struct visor_controlvm_channel,
369 saved_crash_message_count),
370 &local_crash_msg_count, sizeof(u16));
371 if (err) {
372 dev_err(&chipset_dev->acpi_device->dev,
373 "failed to read message count\n");
374 return err;
375 }
376 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
377 dev_err(&chipset_dev->acpi_device->dev,
378 "invalid number of messages\n");
379 return -EIO;
380 }
381 err = visorchannel_read(chipset_dev->controlvm_channel,
382 offsetof(struct visor_controlvm_channel,
383 saved_crash_message_offset),
384 &local_crash_msg_offset, sizeof(u32));
385 if (err) {
386 dev_err(&chipset_dev->acpi_device->dev,
387 "failed to read offset\n");
388 return err;
389 }
390 switch (cr_type) {
391 case CRASH_DEV:
392 local_crash_msg_offset += sizeof(struct controlvm_message);
393 err = visorchannel_write(chipset_dev->controlvm_channel,
394 local_crash_msg_offset, msg,
395 sizeof(struct controlvm_message));
396 if (err) {
397 dev_err(&chipset_dev->acpi_device->dev,
398 "failed to write dev msg\n");
399 return err;
400 }
401 break;
402 case CRASH_BUS:
403 err = visorchannel_write(chipset_dev->controlvm_channel,
404 local_crash_msg_offset, msg,
405 sizeof(struct controlvm_message));
406 if (err) {
407 dev_err(&chipset_dev->acpi_device->dev,
408 "failed to write bus msg\n");
409 return err;
410 }
411 break;
412 default:
413 dev_err(&chipset_dev->acpi_device->dev,
414 "Invalid crash_obj_type\n");
415 break;
416 }
417 return 0;
418}
419
420static int controlvm_responder(enum controlvm_id cmd_id,
421 struct controlvm_message_header *pending_msg_hdr,
422 int response)
423{
424 if (pending_msg_hdr->id != (u32)cmd_id)
425 return -EINVAL;
426
427 return controlvm_respond(pending_msg_hdr, response, NULL);
428}
429
430static int device_changestate_responder(enum controlvm_id cmd_id,
431 struct visor_device *p, int response,
432 struct visor_segment_state state)
433{
434 struct controlvm_message outmsg;
435
436 if (p->pending_msg_hdr->id != cmd_id)
437 return -EINVAL;
438
439 controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
440 outmsg.cmd.device_change_state.bus_no = p->chipset_bus_no;
441 outmsg.cmd.device_change_state.dev_no = p->chipset_dev_no;
442 outmsg.cmd.device_change_state.state = state;
443 return visorchannel_signalinsert(chipset_dev->controlvm_channel,
444 CONTROLVM_QUEUE_REQUEST, &outmsg);
445}
446
447static int visorbus_create(struct controlvm_message *inmsg)
448{
449 struct controlvm_message_packet *cmd = &inmsg->cmd;
450 struct controlvm_message_header *pmsg_hdr;
451 u32 bus_no = cmd->create_bus.bus_no;
452 struct visor_device *bus_info;
453 struct visorchannel *visorchannel;
454 int err;
455
456 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
457 if (bus_info && bus_info->state.created == 1) {
458 dev_err(&chipset_dev->acpi_device->dev,
459 "failed %s: already exists\n", __func__);
460 err = -EEXIST;
461 goto err_respond;
462 }
463 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
464 if (!bus_info) {
465 err = -ENOMEM;
466 goto err_respond;
467 }
468 INIT_LIST_HEAD(&bus_info->list_all);
469 bus_info->chipset_bus_no = bus_no;
470 bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
471 if (guid_equal(&cmd->create_bus.bus_inst_guid, &visor_siovm_guid)) {
472 err = save_crash_message(inmsg, CRASH_BUS);
473 if (err)
474 goto err_free_bus_info;
475 }
476 if (inmsg->hdr.flags.response_expected == 1) {
477 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
478 if (!pmsg_hdr) {
479 err = -ENOMEM;
480 goto err_free_bus_info;
481 }
482 memcpy(pmsg_hdr, &inmsg->hdr,
483 sizeof(struct controlvm_message_header));
484 bus_info->pending_msg_hdr = pmsg_hdr;
485 }
486 visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
487 GFP_KERNEL,
488 &cmd->create_bus.bus_data_type_guid,
489 false);
490 if (!visorchannel) {
491 err = -ENOMEM;
492 goto err_free_pending_msg;
493 }
494 bus_info->visorchannel = visorchannel;
495
496 err = visorbus_create_instance(bus_info);
497 if (err)
498 goto err_destroy_channel;
499 return 0;
500
501err_destroy_channel:
502 visorchannel_destroy(visorchannel);
503
504err_free_pending_msg:
505 kfree(bus_info->pending_msg_hdr);
506
507err_free_bus_info:
508 kfree(bus_info);
509
510err_respond:
511 if (inmsg->hdr.flags.response_expected == 1)
512 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
513 return err;
514}
515
516static int visorbus_destroy(struct controlvm_message *inmsg)
517{
518 struct controlvm_message_header *pmsg_hdr;
519 u32 bus_no = inmsg->cmd.destroy_bus.bus_no;
520 struct visor_device *bus_info;
521 int err;
522
523 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
524 if (!bus_info) {
525 err = -ENODEV;
526 goto err_respond;
527 }
528 if (bus_info->state.created == 0) {
529 err = -ENOENT;
530 goto err_respond;
531 }
532 if (bus_info->pending_msg_hdr) {
533
534 err = -EEXIST;
535 goto err_respond;
536 }
537 if (inmsg->hdr.flags.response_expected == 1) {
538 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
539 if (!pmsg_hdr) {
540 err = -ENOMEM;
541 goto err_respond;
542 }
543 memcpy(pmsg_hdr, &inmsg->hdr,
544 sizeof(struct controlvm_message_header));
545 bus_info->pending_msg_hdr = pmsg_hdr;
546 }
547
548 visorbus_remove_instance(bus_info);
549 return 0;
550
551err_respond:
552 if (inmsg->hdr.flags.response_expected == 1)
553 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
554 return err;
555}
556
557static const guid_t *parser_id_get(struct parser_context *ctx)
558{
559 return &ctx->data.id;
560}
561
562static void *parser_string_get(u8 *pscan, int nscan)
563{
564 int value_length;
565 void *value;
566
567 if (nscan == 0)
568 return NULL;
569
570 value_length = strnlen(pscan, nscan);
571 value = kzalloc(value_length + 1, GFP_KERNEL);
572 if (!value)
573 return NULL;
574 if (value_length > 0)
575 memcpy(value, pscan, value_length);
576 return value;
577}
578
579static void *parser_name_get(struct parser_context *ctx)
580{
581 struct visor_controlvm_parameters_header *phdr;
582
583 phdr = &ctx->data;
584 if ((unsigned long)phdr->name_offset +
585 (unsigned long)phdr->name_length > ctx->param_bytes)
586 return NULL;
587 ctx->curr = (char *)&phdr + phdr->name_offset;
588 ctx->bytes_remaining = phdr->name_length;
589 return parser_string_get(ctx->curr, phdr->name_length);
590}
591
592static int visorbus_configure(struct controlvm_message *inmsg,
593 struct parser_context *parser_ctx)
594{
595 struct controlvm_message_packet *cmd = &inmsg->cmd;
596 u32 bus_no;
597 struct visor_device *bus_info;
598 int err = 0;
599
600 bus_no = cmd->configure_bus.bus_no;
601 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
602 if (!bus_info) {
603 err = -EINVAL;
604 goto err_respond;
605 }
606 if (bus_info->state.created == 0) {
607 err = -EINVAL;
608 goto err_respond;
609 }
610 if (bus_info->pending_msg_hdr) {
611 err = -EIO;
612 goto err_respond;
613 }
614 err = visorchannel_set_clientpartition(bus_info->visorchannel,
615 cmd->configure_bus.guest_handle);
616 if (err)
617 goto err_respond;
618 if (parser_ctx) {
619 const guid_t *partition_guid = parser_id_get(parser_ctx);
620
621 guid_copy(&bus_info->partition_guid, partition_guid);
622 bus_info->name = parser_name_get(parser_ctx);
623 }
624 if (inmsg->hdr.flags.response_expected == 1)
625 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
626 return 0;
627
628err_respond:
629 dev_err(&chipset_dev->acpi_device->dev,
630 "%s exited with err: %d\n", __func__, err);
631 if (inmsg->hdr.flags.response_expected == 1)
632 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
633 return err;
634}
635
636static int visorbus_device_create(struct controlvm_message *inmsg)
637{
638 struct controlvm_message_packet *cmd = &inmsg->cmd;
639 struct controlvm_message_header *pmsg_hdr;
640 u32 bus_no = cmd->create_device.bus_no;
641 u32 dev_no = cmd->create_device.dev_no;
642 struct visor_device *dev_info;
643 struct visor_device *bus_info;
644 struct visorchannel *visorchannel;
645 int err;
646
647 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
648 if (!bus_info) {
649 dev_err(&chipset_dev->acpi_device->dev,
650 "failed to get bus by id: %d\n", bus_no);
651 err = -ENODEV;
652 goto err_respond;
653 }
654 if (bus_info->state.created == 0) {
655 dev_err(&chipset_dev->acpi_device->dev,
656 "bus not created, id: %d\n", bus_no);
657 err = -EINVAL;
658 goto err_respond;
659 }
660 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
661 if (dev_info && dev_info->state.created == 1) {
662 dev_err(&chipset_dev->acpi_device->dev,
663 "failed to get bus by id: %d/%d\n", bus_no, dev_no);
664 err = -EEXIST;
665 goto err_respond;
666 }
667
668 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
669 if (!dev_info) {
670 err = -ENOMEM;
671 goto err_respond;
672 }
673 dev_info->chipset_bus_no = bus_no;
674 dev_info->chipset_dev_no = dev_no;
675 guid_copy(&dev_info->inst, &cmd->create_device.dev_inst_guid);
676 dev_info->device.parent = &bus_info->device;
677 visorchannel = visorchannel_create(cmd->create_device.channel_addr,
678 GFP_KERNEL,
679 &cmd->create_device.data_type_guid,
680 true);
681 if (!visorchannel) {
682 dev_err(&chipset_dev->acpi_device->dev,
683 "failed to create visorchannel: %d/%d\n",
684 bus_no, dev_no);
685 err = -ENOMEM;
686 goto err_free_dev_info;
687 }
688 dev_info->visorchannel = visorchannel;
689 guid_copy(&dev_info->channel_type_guid,
690 &cmd->create_device.data_type_guid);
691 if (guid_equal(&cmd->create_device.data_type_guid,
692 &visor_vhba_channel_guid)) {
693 err = save_crash_message(inmsg, CRASH_DEV);
694 if (err)
695 goto err_destroy_visorchannel;
696 }
697 if (inmsg->hdr.flags.response_expected == 1) {
698 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
699 if (!pmsg_hdr) {
700 err = -ENOMEM;
701 goto err_destroy_visorchannel;
702 }
703 memcpy(pmsg_hdr, &inmsg->hdr,
704 sizeof(struct controlvm_message_header));
705 dev_info->pending_msg_hdr = pmsg_hdr;
706 }
707
708 err = create_visor_device(dev_info);
709 if (err)
710 goto err_destroy_visorchannel;
711
712 return 0;
713
714err_destroy_visorchannel:
715 visorchannel_destroy(visorchannel);
716
717err_free_dev_info:
718 kfree(dev_info);
719
720err_respond:
721 if (inmsg->hdr.flags.response_expected == 1)
722 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
723 return err;
724}
725
726static int visorbus_device_changestate(struct controlvm_message *inmsg)
727{
728 struct controlvm_message_packet *cmd = &inmsg->cmd;
729 struct controlvm_message_header *pmsg_hdr;
730 u32 bus_no = cmd->device_change_state.bus_no;
731 u32 dev_no = cmd->device_change_state.dev_no;
732 struct visor_segment_state state = cmd->device_change_state.state;
733 struct visor_device *dev_info;
734 int err = 0;
735
736 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
737 if (!dev_info) {
738 err = -ENODEV;
739 goto err_respond;
740 }
741 if (dev_info->state.created == 0) {
742 err = -EINVAL;
743 goto err_respond;
744 }
745 if (dev_info->pending_msg_hdr) {
746
747 err = -EIO;
748 goto err_respond;
749 }
750
751 if (inmsg->hdr.flags.response_expected == 1) {
752 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
753 if (!pmsg_hdr) {
754 err = -ENOMEM;
755 goto err_respond;
756 }
757 memcpy(pmsg_hdr, &inmsg->hdr,
758 sizeof(struct controlvm_message_header));
759 dev_info->pending_msg_hdr = pmsg_hdr;
760 }
761 if (state.alive == segment_state_running.alive &&
762 state.operating == segment_state_running.operating)
763
764 err = visorchipset_device_resume(dev_info);
765
766 else if (state.alive == segment_state_standby.alive &&
767 state.operating == segment_state_standby.operating)
768
769
770
771
772 err = visorchipset_device_pause(dev_info);
773 if (err)
774 goto err_respond;
775 return 0;
776
777err_respond:
778 dev_err(&chipset_dev->acpi_device->dev, "failed: %d\n", err);
779 if (inmsg->hdr.flags.response_expected == 1)
780 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
781 return err;
782}
783
784static int visorbus_device_destroy(struct controlvm_message *inmsg)
785{
786 struct controlvm_message_packet *cmd = &inmsg->cmd;
787 struct controlvm_message_header *pmsg_hdr;
788 u32 bus_no = cmd->destroy_device.bus_no;
789 u32 dev_no = cmd->destroy_device.dev_no;
790 struct visor_device *dev_info;
791 int err;
792
793 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
794 if (!dev_info) {
795 err = -ENODEV;
796 goto err_respond;
797 }
798 if (dev_info->state.created == 0) {
799 err = -EINVAL;
800 goto err_respond;
801 }
802 if (dev_info->pending_msg_hdr) {
803
804 err = -EIO;
805 goto err_respond;
806 }
807 if (inmsg->hdr.flags.response_expected == 1) {
808 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
809 if (!pmsg_hdr) {
810 err = -ENOMEM;
811 goto err_respond;
812 }
813
814 memcpy(pmsg_hdr, &inmsg->hdr,
815 sizeof(struct controlvm_message_header));
816 dev_info->pending_msg_hdr = pmsg_hdr;
817 }
818 kfree(dev_info->name);
819 remove_visor_device(dev_info);
820 return 0;
821
822err_respond:
823 if (inmsg->hdr.flags.response_expected == 1)
824 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
825 return err;
826}
827
828
829
830
831
832
833
834
835
836
837
838
839
840#define PARAHOTPLUG_TIMEOUT_MS 2000
841
842
843
844
845
846
847
848
849static int parahotplug_next_id(void)
850{
851 static atomic_t id = ATOMIC_INIT(0);
852
853 return atomic_inc_return(&id);
854}
855
856
857
858
859
860
861
862
863static unsigned long parahotplug_next_expiration(void)
864{
865 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
866}
867
868
869
870
871
872
873
874
875
876static struct parahotplug_request *parahotplug_request_create(
877 struct controlvm_message *msg)
878{
879 struct parahotplug_request *req;
880
881 req = kmalloc(sizeof(*req), GFP_KERNEL);
882 if (!req)
883 return NULL;
884 req->id = parahotplug_next_id();
885 req->expiration = parahotplug_next_expiration();
886 req->msg = *msg;
887 return req;
888}
889
890
891
892
893
894static void parahotplug_request_destroy(struct parahotplug_request *req)
895{
896 kfree(req);
897}
898
899static LIST_HEAD(parahotplug_request_list);
900
901static DEFINE_SPINLOCK(parahotplug_request_list_lock);
902
903
904
905
906
907
908
909
910
911
912
913
914static int parahotplug_request_complete(int id, u16 active)
915{
916 struct list_head *pos;
917 struct list_head *tmp;
918 struct parahotplug_request *req;
919
920 spin_lock(¶hotplug_request_list_lock);
921
922 list_for_each_safe(pos, tmp, ¶hotplug_request_list) {
923 req = list_entry(pos, struct parahotplug_request, list);
924 if (req->id == id) {
925
926
927
928
929 list_del(pos);
930 spin_unlock(¶hotplug_request_list_lock);
931 req->msg.cmd.device_change_state.state.active = active;
932 if (req->msg.hdr.flags.response_expected)
933 controlvm_respond(
934 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
935 &req->msg.cmd.device_change_state.state);
936 parahotplug_request_destroy(req);
937 return 0;
938 }
939 }
940 spin_unlock(¶hotplug_request_list_lock);
941 return -EINVAL;
942}
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957static ssize_t devicedisabled_store(struct device *dev,
958 struct device_attribute *attr,
959 const char *buf, size_t count)
960{
961 unsigned int id;
962 int err;
963
964 if (kstrtouint(buf, 10, &id))
965 return -EINVAL;
966 err = parahotplug_request_complete(id, 0);
967 if (err < 0)
968 return err;
969 return count;
970}
971static DEVICE_ATTR_WO(devicedisabled);
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986static ssize_t deviceenabled_store(struct device *dev,
987 struct device_attribute *attr,
988 const char *buf, size_t count)
989{
990 unsigned int id;
991
992 if (kstrtouint(buf, 10, &id))
993 return -EINVAL;
994 parahotplug_request_complete(id, 1);
995 return count;
996}
997static DEVICE_ATTR_WO(deviceenabled);
998
999static struct attribute *visorchipset_install_attrs[] = {
1000 &dev_attr_toolaction.attr,
1001 &dev_attr_boottotool.attr,
1002 &dev_attr_error.attr,
1003 &dev_attr_textid.attr,
1004 &dev_attr_remaining_steps.attr,
1005 NULL
1006};
1007
1008static const struct attribute_group visorchipset_install_group = {
1009 .name = "install",
1010 .attrs = visorchipset_install_attrs
1011};
1012
1013static struct attribute *visorchipset_parahotplug_attrs[] = {
1014 &dev_attr_devicedisabled.attr,
1015 &dev_attr_deviceenabled.attr,
1016 NULL
1017};
1018
1019static const struct attribute_group visorchipset_parahotplug_group = {
1020 .name = "parahotplug",
1021 .attrs = visorchipset_parahotplug_attrs
1022};
1023
1024static const struct attribute_group *visorchipset_dev_groups[] = {
1025 &visorchipset_install_group,
1026 &visorchipset_parahotplug_group,
1027 NULL
1028};
1029
1030
1031
1032
1033
1034
1035
1036
1037static int parahotplug_request_kickoff(struct parahotplug_request *req)
1038{
1039 struct controlvm_message_packet *cmd = &req->msg.cmd;
1040 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1041 env_func[40];
1042 char *envp[] = { env_cmd, env_id, env_state, env_bus, env_dev,
1043 env_func, NULL
1044 };
1045
1046 sprintf(env_cmd, "VISOR_PARAHOTPLUG=1");
1047 sprintf(env_id, "VISOR_PARAHOTPLUG_ID=%d", req->id);
1048 sprintf(env_state, "VISOR_PARAHOTPLUG_STATE=%d",
1049 cmd->device_change_state.state.active);
1050 sprintf(env_bus, "VISOR_PARAHOTPLUG_BUS=%d",
1051 cmd->device_change_state.bus_no);
1052 sprintf(env_dev, "VISOR_PARAHOTPLUG_DEVICE=%d",
1053 cmd->device_change_state.dev_no >> 3);
1054 sprintf(env_func, "VISOR_PARAHOTPLUG_FUNCTION=%d",
1055 cmd->device_change_state.dev_no & 0x7);
1056 return kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
1057 KOBJ_CHANGE, envp);
1058}
1059
1060
1061
1062
1063
1064
1065static int parahotplug_process_message(struct controlvm_message *inmsg)
1066{
1067 struct parahotplug_request *req;
1068 int err;
1069
1070 req = parahotplug_request_create(inmsg);
1071 if (!req)
1072 return -ENOMEM;
1073
1074
1075
1076
1077 if (inmsg->cmd.device_change_state.state.active) {
1078 err = parahotplug_request_kickoff(req);
1079 if (err)
1080 goto err_respond;
1081 controlvm_respond(&inmsg->hdr, CONTROLVM_RESP_SUCCESS,
1082 &inmsg->cmd.device_change_state.state);
1083 parahotplug_request_destroy(req);
1084 return 0;
1085 }
1086
1087
1088
1089
1090
1091 spin_lock(¶hotplug_request_list_lock);
1092 list_add_tail(&req->list, ¶hotplug_request_list);
1093 spin_unlock(¶hotplug_request_list_lock);
1094 err = parahotplug_request_kickoff(req);
1095 if (err)
1096 goto err_respond;
1097 return 0;
1098
1099err_respond:
1100 controlvm_respond(&inmsg->hdr, err,
1101 &inmsg->cmd.device_change_state.state);
1102 return err;
1103}
1104
1105
1106
1107
1108
1109
1110
1111
1112static int chipset_ready_uevent(struct controlvm_message_header *msg_hdr)
1113{
1114 int res;
1115
1116 res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj, KOBJ_ONLINE);
1117 if (msg_hdr->flags.response_expected)
1118 controlvm_respond(msg_hdr, res, NULL);
1119 return res;
1120}
1121
1122
1123
1124
1125
1126
1127
1128
1129static int chipset_selftest_uevent(struct controlvm_message_header *msg_hdr)
1130{
1131 char env_selftest[20];
1132 char *envp[] = { env_selftest, NULL };
1133 int res;
1134
1135 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1136 res = kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
1137 KOBJ_CHANGE, envp);
1138 if (msg_hdr->flags.response_expected)
1139 controlvm_respond(msg_hdr, res, NULL);
1140 return res;
1141}
1142
1143
1144
1145
1146
1147
1148
1149
1150static int chipset_notready_uevent(struct controlvm_message_header *msg_hdr)
1151{
1152 int res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
1153 KOBJ_OFFLINE);
1154
1155 if (msg_hdr->flags.response_expected)
1156 controlvm_respond(msg_hdr, res, NULL);
1157 return res;
1158}
1159
1160static int unisys_vmcall(unsigned long tuple, unsigned long param)
1161{
1162 int result = 0;
1163 unsigned int cpuid_eax, cpuid_ebx, cpuid_ecx, cpuid_edx;
1164 unsigned long reg_ebx;
1165 unsigned long reg_ecx;
1166
1167 reg_ebx = param & 0xFFFFFFFF;
1168 reg_ecx = param >> 32;
1169 cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
1170 if (!(cpuid_ecx & 0x80000000))
1171 return -EPERM;
1172 __asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
1173 "a"(tuple), "b"(reg_ebx), "c"(reg_ecx));
1174 if (result)
1175 goto error;
1176 return 0;
1177
1178
1179error:
1180 switch (result) {
1181 case VMCALL_RESULT_INVALID_PARAM:
1182 return -EINVAL;
1183 case VMCALL_RESULT_DATA_UNAVAILABLE:
1184 return -ENODEV;
1185 default:
1186 return -EFAULT;
1187 }
1188}
1189
1190static int controlvm_channel_create(struct visorchipset_device *dev)
1191{
1192 struct visorchannel *chan;
1193 u64 addr;
1194 int err;
1195
1196 err = unisys_vmcall(VMCALL_CONTROLVM_ADDR,
1197 virt_to_phys(&dev->controlvm_params));
1198 if (err)
1199 return err;
1200 addr = dev->controlvm_params.address;
1201 chan = visorchannel_create(addr, GFP_KERNEL,
1202 &visor_controlvm_channel_guid, true);
1203 if (!chan)
1204 return -ENOMEM;
1205 dev->controlvm_channel = chan;
1206 return 0;
1207}
1208
1209static void setup_crash_devices_work_queue(struct work_struct *work)
1210{
1211 struct controlvm_message local_crash_bus_msg;
1212 struct controlvm_message local_crash_dev_msg;
1213 struct controlvm_message msg;
1214 u32 local_crash_msg_offset;
1215 u16 local_crash_msg_count;
1216
1217
1218 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
1219 msg.cmd.init_chipset.bus_count = 23;
1220 msg.cmd.init_chipset.switch_count = 0;
1221 chipset_init(&msg);
1222
1223 if (visorchannel_read(chipset_dev->controlvm_channel,
1224 offsetof(struct visor_controlvm_channel,
1225 saved_crash_message_count),
1226 &local_crash_msg_count, sizeof(u16)) < 0) {
1227 dev_err(&chipset_dev->acpi_device->dev,
1228 "failed to read channel\n");
1229 return;
1230 }
1231 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
1232 dev_err(&chipset_dev->acpi_device->dev, "invalid count\n");
1233 return;
1234 }
1235
1236 if (visorchannel_read(chipset_dev->controlvm_channel,
1237 offsetof(struct visor_controlvm_channel,
1238 saved_crash_message_offset),
1239 &local_crash_msg_offset, sizeof(u32)) < 0) {
1240 dev_err(&chipset_dev->acpi_device->dev,
1241 "failed to read channel\n");
1242 return;
1243 }
1244
1245 if (visorchannel_read(chipset_dev->controlvm_channel,
1246 local_crash_msg_offset,
1247 &local_crash_bus_msg,
1248 sizeof(struct controlvm_message)) < 0) {
1249 dev_err(&chipset_dev->acpi_device->dev,
1250 "failed to read channel\n");
1251 return;
1252 }
1253
1254 if (visorchannel_read(chipset_dev->controlvm_channel,
1255 local_crash_msg_offset +
1256 sizeof(struct controlvm_message),
1257 &local_crash_dev_msg,
1258 sizeof(struct controlvm_message)) < 0) {
1259 dev_err(&chipset_dev->acpi_device->dev,
1260 "failed to read channel\n");
1261 return;
1262 }
1263
1264 if (!local_crash_bus_msg.cmd.create_bus.channel_addr) {
1265 dev_err(&chipset_dev->acpi_device->dev,
1266 "no valid create_bus message\n");
1267 return;
1268 }
1269 visorbus_create(&local_crash_bus_msg);
1270
1271 if (!local_crash_dev_msg.cmd.create_device.channel_addr) {
1272 dev_err(&chipset_dev->acpi_device->dev,
1273 "no valid create_device message\n");
1274 return;
1275 }
1276 visorbus_device_create(&local_crash_dev_msg);
1277}
1278
1279void visorbus_response(struct visor_device *bus_info, int response,
1280 int controlvm_id)
1281{
1282 if (!bus_info->pending_msg_hdr)
1283 return;
1284
1285 controlvm_responder(controlvm_id, bus_info->pending_msg_hdr, response);
1286 kfree(bus_info->pending_msg_hdr);
1287 bus_info->pending_msg_hdr = NULL;
1288}
1289
1290void visorbus_device_changestate_response(struct visor_device *dev_info,
1291 int response,
1292 struct visor_segment_state state)
1293{
1294 if (!dev_info->pending_msg_hdr)
1295 return;
1296
1297 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE, dev_info,
1298 response, state);
1299 kfree(dev_info->pending_msg_hdr);
1300 dev_info->pending_msg_hdr = NULL;
1301}
1302
1303static void parser_done(struct parser_context *ctx)
1304{
1305 chipset_dev->controlvm_payload_bytes_buffered -= ctx->param_bytes;
1306 kfree(ctx);
1307}
1308
1309static struct parser_context *parser_init_stream(u64 addr, u32 bytes,
1310 bool *retry)
1311{
1312 unsigned long allocbytes;
1313 struct parser_context *ctx;
1314 void *mapping;
1315
1316 *retry = false;
1317
1318 allocbytes = (unsigned long)bytes + 1 + (sizeof(struct parser_context) -
1319 sizeof(struct visor_controlvm_parameters_header));
1320 if ((chipset_dev->controlvm_payload_bytes_buffered + bytes) >
1321 MAX_CONTROLVM_PAYLOAD_BYTES) {
1322 *retry = true;
1323 return NULL;
1324 }
1325 ctx = kzalloc(allocbytes, GFP_KERNEL);
1326 if (!ctx) {
1327 *retry = true;
1328 return NULL;
1329 }
1330 ctx->allocbytes = allocbytes;
1331 ctx->param_bytes = bytes;
1332 mapping = memremap(addr, bytes, MEMREMAP_WB);
1333 if (!mapping)
1334 goto err_finish_ctx;
1335 memcpy(&ctx->data, mapping, bytes);
1336 memunmap(mapping);
1337 ctx->byte_stream = true;
1338 chipset_dev->controlvm_payload_bytes_buffered += ctx->param_bytes;
1339 return ctx;
1340
1341err_finish_ctx:
1342 kfree(ctx);
1343 return NULL;
1344}
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359static int handle_command(struct controlvm_message inmsg, u64 channel_addr)
1360{
1361 struct controlvm_message_packet *cmd = &inmsg.cmd;
1362 u64 parm_addr;
1363 u32 parm_bytes;
1364 struct parser_context *parser_ctx = NULL;
1365 struct controlvm_message ackmsg;
1366 int err = 0;
1367
1368
1369 parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1370 parm_bytes = inmsg.hdr.payload_bytes;
1371
1372
1373
1374
1375
1376 if (parm_bytes) {
1377 bool retry;
1378
1379 parser_ctx = parser_init_stream(parm_addr, parm_bytes, &retry);
1380 if (!parser_ctx && retry)
1381 return -EAGAIN;
1382 }
1383 controlvm_init_response(&ackmsg, &inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1384 err = visorchannel_signalinsert(chipset_dev->controlvm_channel,
1385 CONTROLVM_QUEUE_ACK, &ackmsg);
1386 if (err)
1387 return err;
1388 switch (inmsg.hdr.id) {
1389 case CONTROLVM_CHIPSET_INIT:
1390 err = chipset_init(&inmsg);
1391 break;
1392 case CONTROLVM_BUS_CREATE:
1393 err = visorbus_create(&inmsg);
1394 break;
1395 case CONTROLVM_BUS_DESTROY:
1396 err = visorbus_destroy(&inmsg);
1397 break;
1398 case CONTROLVM_BUS_CONFIGURE:
1399 err = visorbus_configure(&inmsg, parser_ctx);
1400 break;
1401 case CONTROLVM_DEVICE_CREATE:
1402 err = visorbus_device_create(&inmsg);
1403 break;
1404 case CONTROLVM_DEVICE_CHANGESTATE:
1405 if (cmd->device_change_state.flags.phys_device) {
1406 err = parahotplug_process_message(&inmsg);
1407 } else {
1408
1409
1410
1411
1412 err = visorbus_device_changestate(&inmsg);
1413 break;
1414 }
1415 break;
1416 case CONTROLVM_DEVICE_DESTROY:
1417 err = visorbus_device_destroy(&inmsg);
1418 break;
1419 case CONTROLVM_DEVICE_CONFIGURE:
1420
1421 if (inmsg.hdr.flags.response_expected)
1422 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS,
1423 NULL);
1424 break;
1425 case CONTROLVM_CHIPSET_READY:
1426 err = chipset_ready_uevent(&inmsg.hdr);
1427 break;
1428 case CONTROLVM_CHIPSET_SELFTEST:
1429 err = chipset_selftest_uevent(&inmsg.hdr);
1430 break;
1431 case CONTROLVM_CHIPSET_STOP:
1432 err = chipset_notready_uevent(&inmsg.hdr);
1433 break;
1434 default:
1435 err = -ENOMSG;
1436 if (inmsg.hdr.flags.response_expected)
1437 controlvm_respond(&inmsg.hdr,
1438 -CONTROLVM_RESP_ID_UNKNOWN, NULL);
1439 break;
1440 }
1441 if (parser_ctx) {
1442 parser_done(parser_ctx);
1443 parser_ctx = NULL;
1444 }
1445 return err;
1446}
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456static int read_controlvm_event(struct controlvm_message *msg)
1457{
1458 int err = visorchannel_signalremove(chipset_dev->controlvm_channel,
1459 CONTROLVM_QUEUE_EVENT, msg);
1460
1461 if (err)
1462 return err;
1463
1464 if (msg->hdr.flags.test_message == 1)
1465 return -EINVAL;
1466 return 0;
1467}
1468
1469
1470
1471
1472
1473static void parahotplug_process_list(void)
1474{
1475 struct list_head *pos;
1476 struct list_head *tmp;
1477
1478 spin_lock(¶hotplug_request_list_lock);
1479 list_for_each_safe(pos, tmp, ¶hotplug_request_list) {
1480 struct parahotplug_request *req =
1481 list_entry(pos, struct parahotplug_request, list);
1482
1483 if (!time_after_eq(jiffies, req->expiration))
1484 continue;
1485 list_del(pos);
1486 if (req->msg.hdr.flags.response_expected)
1487 controlvm_respond(
1488 &req->msg.hdr,
1489 CONTROLVM_RESP_DEVICE_UDEV_TIMEOUT,
1490 &req->msg.cmd.device_change_state.state);
1491 parahotplug_request_destroy(req);
1492 }
1493 spin_unlock(¶hotplug_request_list_lock);
1494}
1495
1496static void controlvm_periodic_work(struct work_struct *work)
1497{
1498 struct controlvm_message inmsg;
1499 int count = 0;
1500 int err;
1501
1502
1503 do {
1504 err = visorchannel_signalremove(chipset_dev->controlvm_channel,
1505 CONTROLVM_QUEUE_RESPONSE,
1506 &inmsg);
1507 } while ((!err) && (++count < CONTROLVM_MESSAGE_MAX));
1508 if (err != -EAGAIN)
1509 goto schedule_out;
1510 if (chipset_dev->controlvm_pending_msg_valid) {
1511
1512
1513
1514
1515 inmsg = chipset_dev->controlvm_pending_msg;
1516 chipset_dev->controlvm_pending_msg_valid = false;
1517 err = 0;
1518 } else {
1519 err = read_controlvm_event(&inmsg);
1520 }
1521 while (!err) {
1522 chipset_dev->most_recent_message_jiffies = jiffies;
1523 err = handle_command(inmsg,
1524 visorchannel_get_physaddr
1525 (chipset_dev->controlvm_channel));
1526 if (err == -EAGAIN) {
1527 chipset_dev->controlvm_pending_msg = inmsg;
1528 chipset_dev->controlvm_pending_msg_valid = true;
1529 break;
1530 }
1531
1532 err = read_controlvm_event(&inmsg);
1533 }
1534
1535 parahotplug_process_list();
1536
1537
1538
1539
1540
1541
1542schedule_out:
1543 if (time_after(jiffies, chipset_dev->most_recent_message_jiffies +
1544 (HZ * MIN_IDLE_SECONDS))) {
1545
1546
1547
1548
1549 if (chipset_dev->poll_jiffies != POLLJIFFIES_CONTROLVM_SLOW)
1550 chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_SLOW;
1551 } else {
1552 if (chipset_dev->poll_jiffies != POLLJIFFIES_CONTROLVM_FAST)
1553 chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
1554 }
1555 schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
1556 chipset_dev->poll_jiffies);
1557}
1558
1559static int visorchipset_init(struct acpi_device *acpi_device)
1560{
1561 int err = -ENODEV;
1562 struct visorchannel *controlvm_channel;
1563
1564 chipset_dev = kzalloc(sizeof(*chipset_dev), GFP_KERNEL);
1565 if (!chipset_dev)
1566 goto error;
1567 err = controlvm_channel_create(chipset_dev);
1568 if (err)
1569 goto error_free_chipset_dev;
1570 acpi_device->driver_data = chipset_dev;
1571 chipset_dev->acpi_device = acpi_device;
1572 chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
1573 err = sysfs_create_groups(&chipset_dev->acpi_device->dev.kobj,
1574 visorchipset_dev_groups);
1575 if (err < 0)
1576 goto error_destroy_channel;
1577 controlvm_channel = chipset_dev->controlvm_channel;
1578 if (!visor_check_channel(visorchannel_get_header(controlvm_channel),
1579 &chipset_dev->acpi_device->dev,
1580 &visor_controlvm_channel_guid,
1581 "controlvm",
1582 sizeof(struct visor_controlvm_channel),
1583 VISOR_CONTROLVM_CHANNEL_VERSIONID,
1584 VISOR_CHANNEL_SIGNATURE))
1585 goto error_delete_groups;
1586
1587 if (is_kdump_kernel())
1588 INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
1589 setup_crash_devices_work_queue);
1590 else
1591 INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
1592 controlvm_periodic_work);
1593 chipset_dev->most_recent_message_jiffies = jiffies;
1594 chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
1595 schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
1596 chipset_dev->poll_jiffies);
1597 err = visorbus_init();
1598 if (err < 0)
1599 goto error_cancel_work;
1600 return 0;
1601
1602error_cancel_work:
1603 cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
1604
1605error_delete_groups:
1606 sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
1607 visorchipset_dev_groups);
1608
1609error_destroy_channel:
1610 visorchannel_destroy(chipset_dev->controlvm_channel);
1611
1612error_free_chipset_dev:
1613 kfree(chipset_dev);
1614
1615error:
1616 dev_err(&acpi_device->dev, "failed with error %d\n", err);
1617 return err;
1618}
1619
1620static int visorchipset_exit(struct acpi_device *acpi_device)
1621{
1622 visorbus_exit();
1623 cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
1624 sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
1625 visorchipset_dev_groups);
1626 visorchannel_destroy(chipset_dev->controlvm_channel);
1627 kfree(chipset_dev);
1628 return 0;
1629}
1630
1631static const struct acpi_device_id unisys_device_ids[] = {
1632 {"PNP0A07", 0},
1633 {"", 0},
1634};
1635
1636static struct acpi_driver unisys_acpi_driver = {
1637 .name = "unisys_acpi",
1638 .class = "unisys_acpi_class",
1639 .owner = THIS_MODULE,
1640 .ids = unisys_device_ids,
1641 .ops = {
1642 .add = visorchipset_init,
1643 .remove = visorchipset_exit,
1644 },
1645};
1646
1647MODULE_DEVICE_TABLE(acpi, unisys_device_ids);
1648
1649static __init int visorutil_spar_detect(void)
1650{
1651 unsigned int eax, ebx, ecx, edx;
1652
1653 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1654
1655 cpuid(UNISYS_VISOR_LEAF_ID, &eax, &ebx, &ecx, &edx);
1656 return (ebx == UNISYS_VISOR_ID_EBX) &&
1657 (ecx == UNISYS_VISOR_ID_ECX) &&
1658 (edx == UNISYS_VISOR_ID_EDX);
1659 }
1660 return 0;
1661}
1662
1663static int __init init_unisys(void)
1664{
1665 int result;
1666
1667 if (!visorutil_spar_detect())
1668 return -ENODEV;
1669 result = acpi_bus_register_driver(&unisys_acpi_driver);
1670 if (result)
1671 return -ENODEV;
1672 pr_info("Unisys Visorchipset Driver Loaded.\n");
1673 return 0;
1674};
1675
1676static void __exit exit_unisys(void)
1677{
1678 acpi_bus_unregister_driver(&unisys_acpi_driver);
1679}
1680
1681module_init(init_unisys);
1682module_exit(exit_unisys);
1683
1684MODULE_AUTHOR("Unisys");
1685MODULE_LICENSE("GPL");
1686MODULE_DESCRIPTION("s-Par visorbus driver for virtual device buses");
1687