1
2
3
4
5
6
7
8
9
10
11
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/kthread.h>
15#include <linux/mm.h>
16#include <linux/slab.h>
17
18#include <asm/acrn.h>
19
20#include "acrn_drv.h"
21
22static void ioreq_pause(void);
23static void ioreq_resume(void);
24
25static void ioreq_dispatcher(struct work_struct *work);
26static struct workqueue_struct *ioreq_wq;
27static DECLARE_WORK(ioreq_work, ioreq_dispatcher);
28
29static inline bool has_pending_request(struct acrn_ioreq_client *client)
30{
31 return !bitmap_empty(client->ioreqs_map, ACRN_IO_REQUEST_MAX);
32}
33
34static inline bool is_destroying(struct acrn_ioreq_client *client)
35{
36 return test_bit(ACRN_IOREQ_CLIENT_DESTROYING, &client->flags);
37}
38
39static int ioreq_complete_request(struct acrn_vm *vm, u16 vcpu,
40 struct acrn_io_request *acrn_req)
41{
42 bool polling_mode;
43 int ret = 0;
44
45 polling_mode = acrn_req->completion_polling;
46
47 smp_store_release(&acrn_req->processed, ACRN_IOREQ_STATE_COMPLETE);
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63 if (!polling_mode) {
64 ret = hcall_notify_req_finish(vm->vmid, vcpu);
65 if (ret < 0)
66 dev_err(acrn_dev.this_device,
67 "Notify I/O request finished failed!\n");
68 }
69
70 return ret;
71}
72
73static int acrn_ioreq_complete_request(struct acrn_ioreq_client *client,
74 u16 vcpu,
75 struct acrn_io_request *acrn_req)
76{
77 int ret;
78
79 if (vcpu >= client->vm->vcpu_num)
80 return -EINVAL;
81
82 clear_bit(vcpu, client->ioreqs_map);
83 if (!acrn_req) {
84 acrn_req = (struct acrn_io_request *)client->vm->ioreq_buf;
85 acrn_req += vcpu;
86 }
87
88 ret = ioreq_complete_request(client->vm, vcpu, acrn_req);
89
90 return ret;
91}
92
93int acrn_ioreq_request_default_complete(struct acrn_vm *vm, u16 vcpu)
94{
95 int ret = 0;
96
97 spin_lock_bh(&vm->ioreq_clients_lock);
98 if (vm->default_client)
99 ret = acrn_ioreq_complete_request(vm->default_client,
100 vcpu, NULL);
101 spin_unlock_bh(&vm->ioreq_clients_lock);
102
103 return ret;
104}
105
106
107
108
109
110
111
112
113
114
115int acrn_ioreq_range_add(struct acrn_ioreq_client *client,
116 u32 type, u64 start, u64 end)
117{
118 struct acrn_ioreq_range *range;
119
120 if (end < start) {
121 dev_err(acrn_dev.this_device,
122 "Invalid IO range [0x%llx,0x%llx]\n", start, end);
123 return -EINVAL;
124 }
125
126 range = kzalloc(sizeof(*range), GFP_KERNEL);
127 if (!range)
128 return -ENOMEM;
129
130 range->type = type;
131 range->start = start;
132 range->end = end;
133
134 write_lock_bh(&client->range_lock);
135 list_add(&range->list, &client->range_list);
136 write_unlock_bh(&client->range_lock);
137
138 return 0;
139}
140
141
142
143
144
145
146
147
148void acrn_ioreq_range_del(struct acrn_ioreq_client *client,
149 u32 type, u64 start, u64 end)
150{
151 struct acrn_ioreq_range *range;
152
153 write_lock_bh(&client->range_lock);
154 list_for_each_entry(range, &client->range_list, list) {
155 if (type == range->type &&
156 start == range->start &&
157 end == range->end) {
158 list_del(&range->list);
159 kfree(range);
160 break;
161 }
162 }
163 write_unlock_bh(&client->range_lock);
164}
165
166
167
168
169
170static int ioreq_task(void *data)
171{
172 struct acrn_ioreq_client *client = data;
173 struct acrn_io_request *req;
174 unsigned long *ioreqs_map;
175 int vcpu, ret;
176
177
178
179
180
181
182
183
184
185
186
187 ioreqs_map = client->ioreqs_map;
188 while (!kthread_should_stop()) {
189 acrn_ioreq_client_wait(client);
190 while (has_pending_request(client)) {
191 vcpu = find_first_bit(ioreqs_map, client->vm->vcpu_num);
192 req = client->vm->ioreq_buf->req_slot + vcpu;
193 ret = client->handler(client, req);
194 if (ret < 0) {
195 dev_err(acrn_dev.this_device,
196 "IO handle failure: %d\n", ret);
197 break;
198 }
199 acrn_ioreq_complete_request(client, vcpu, req);
200 }
201 }
202
203 return 0;
204}
205
206
207
208
209
210
211
212void acrn_ioreq_request_clear(struct acrn_vm *vm)
213{
214 struct acrn_ioreq_client *client;
215 bool has_pending = false;
216 unsigned long vcpu;
217 int retry = 10;
218
219
220
221
222
223 set_bit(ACRN_VM_FLAG_CLEARING_IOREQ, &vm->flags);
224
225
226
227
228
229 do {
230 spin_lock_bh(&vm->ioreq_clients_lock);
231 list_for_each_entry(client, &vm->ioreq_clients, list) {
232 has_pending = has_pending_request(client);
233 if (has_pending)
234 break;
235 }
236 spin_unlock_bh(&vm->ioreq_clients_lock);
237
238 if (has_pending)
239 schedule_timeout_interruptible(HZ / 100);
240 } while (has_pending && --retry > 0);
241 if (retry == 0)
242 dev_warn(acrn_dev.this_device,
243 "%s cannot flush pending request!\n", client->name);
244
245
246 spin_lock_bh(&vm->ioreq_clients_lock);
247 client = vm->default_client;
248 if (client) {
249 vcpu = find_next_bit(client->ioreqs_map,
250 ACRN_IO_REQUEST_MAX, 0);
251 while (vcpu < ACRN_IO_REQUEST_MAX) {
252 acrn_ioreq_complete_request(client, vcpu, NULL);
253 vcpu = find_next_bit(client->ioreqs_map,
254 ACRN_IO_REQUEST_MAX, vcpu + 1);
255 }
256 }
257 spin_unlock_bh(&vm->ioreq_clients_lock);
258
259
260 clear_bit(ACRN_VM_FLAG_CLEARING_IOREQ, &vm->flags);
261}
262
263int acrn_ioreq_client_wait(struct acrn_ioreq_client *client)
264{
265 if (client->is_default) {
266
267
268
269
270
271 wait_event_interruptible(client->wq,
272 has_pending_request(client) ||
273 is_destroying(client));
274 if (is_destroying(client))
275 return -ENODEV;
276 } else {
277 wait_event_interruptible(client->wq,
278 has_pending_request(client) ||
279 kthread_should_stop());
280 }
281
282 return 0;
283}
284
285static bool is_cfg_addr(struct acrn_io_request *req)
286{
287 return ((req->type == ACRN_IOREQ_TYPE_PORTIO) &&
288 (req->reqs.pio_request.address == 0xcf8));
289}
290
291static bool is_cfg_data(struct acrn_io_request *req)
292{
293 return ((req->type == ACRN_IOREQ_TYPE_PORTIO) &&
294 ((req->reqs.pio_request.address >= 0xcfc) &&
295 (req->reqs.pio_request.address < (0xcfc + 4))));
296}
297
298
299#define PCI_LOWREG_MASK 0xFC
300
301#define PCI_HIGHREG_MASK 0xF00
302
303#define PCI_FUNCMAX 7
304
305#define PCI_SLOTMAX 31
306
307#define PCI_BUSMAX 255
308#define CONF1_ENABLE 0x80000000UL
309
310
311
312
313
314
315
316
317static bool handle_cf8cfc(struct acrn_vm *vm,
318 struct acrn_io_request *req, u16 vcpu)
319{
320 int offset, pci_cfg_addr, pci_reg;
321 bool is_handled = false;
322
323 if (is_cfg_addr(req)) {
324 WARN_ON(req->reqs.pio_request.size != 4);
325 if (req->reqs.pio_request.direction == ACRN_IOREQ_DIR_WRITE)
326 vm->pci_conf_addr = req->reqs.pio_request.value;
327 else
328 req->reqs.pio_request.value = vm->pci_conf_addr;
329 is_handled = true;
330 } else if (is_cfg_data(req)) {
331 if (!(vm->pci_conf_addr & CONF1_ENABLE)) {
332 if (req->reqs.pio_request.direction ==
333 ACRN_IOREQ_DIR_READ)
334 req->reqs.pio_request.value = 0xffffffff;
335 is_handled = true;
336 } else {
337 offset = req->reqs.pio_request.address - 0xcfc;
338
339 req->type = ACRN_IOREQ_TYPE_PCICFG;
340 pci_cfg_addr = vm->pci_conf_addr;
341 req->reqs.pci_request.bus =
342 (pci_cfg_addr >> 16) & PCI_BUSMAX;
343 req->reqs.pci_request.dev =
344 (pci_cfg_addr >> 11) & PCI_SLOTMAX;
345 req->reqs.pci_request.func =
346 (pci_cfg_addr >> 8) & PCI_FUNCMAX;
347 pci_reg = (pci_cfg_addr & PCI_LOWREG_MASK) +
348 ((pci_cfg_addr >> 16) & PCI_HIGHREG_MASK);
349 req->reqs.pci_request.reg = pci_reg + offset;
350 }
351 }
352
353 if (is_handled)
354 ioreq_complete_request(vm, vcpu, req);
355
356 return is_handled;
357}
358
359static bool in_range(struct acrn_ioreq_range *range,
360 struct acrn_io_request *req)
361{
362 bool ret = false;
363
364 if (range->type == req->type) {
365 switch (req->type) {
366 case ACRN_IOREQ_TYPE_MMIO:
367 if (req->reqs.mmio_request.address >= range->start &&
368 (req->reqs.mmio_request.address +
369 req->reqs.mmio_request.size - 1) <= range->end)
370 ret = true;
371 break;
372 case ACRN_IOREQ_TYPE_PORTIO:
373 if (req->reqs.pio_request.address >= range->start &&
374 (req->reqs.pio_request.address +
375 req->reqs.pio_request.size - 1) <= range->end)
376 ret = true;
377 break;
378 default:
379 break;
380 }
381 }
382
383 return ret;
384}
385
386static struct acrn_ioreq_client *find_ioreq_client(struct acrn_vm *vm,
387 struct acrn_io_request *req)
388{
389 struct acrn_ioreq_client *client, *found = NULL;
390 struct acrn_ioreq_range *range;
391
392 lockdep_assert_held(&vm->ioreq_clients_lock);
393
394 list_for_each_entry(client, &vm->ioreq_clients, list) {
395 read_lock_bh(&client->range_lock);
396 list_for_each_entry(range, &client->range_list, list) {
397 if (in_range(range, req)) {
398 found = client;
399 break;
400 }
401 }
402 read_unlock_bh(&client->range_lock);
403 if (found)
404 break;
405 }
406 return found ? found : vm->default_client;
407}
408
409
410
411
412
413
414
415
416
417
418
419
420struct acrn_ioreq_client *acrn_ioreq_client_create(struct acrn_vm *vm,
421 ioreq_handler_t handler,
422 void *priv, bool is_default,
423 const char *name)
424{
425 struct acrn_ioreq_client *client;
426
427 if (!handler && !is_default) {
428 dev_dbg(acrn_dev.this_device,
429 "Cannot create non-default client w/o handler!\n");
430 return NULL;
431 }
432 client = kzalloc(sizeof(*client), GFP_KERNEL);
433 if (!client)
434 return NULL;
435
436 client->handler = handler;
437 client->vm = vm;
438 client->priv = priv;
439 client->is_default = is_default;
440 if (name)
441 strncpy(client->name, name, sizeof(client->name) - 1);
442 rwlock_init(&client->range_lock);
443 INIT_LIST_HEAD(&client->range_list);
444 init_waitqueue_head(&client->wq);
445
446 if (client->handler) {
447 client->thread = kthread_run(ioreq_task, client, "VM%u-%s",
448 client->vm->vmid, client->name);
449 if (IS_ERR(client->thread)) {
450 kfree(client);
451 return NULL;
452 }
453 }
454
455 spin_lock_bh(&vm->ioreq_clients_lock);
456 if (is_default)
457 vm->default_client = client;
458 else
459 list_add(&client->list, &vm->ioreq_clients);
460 spin_unlock_bh(&vm->ioreq_clients_lock);
461
462 dev_dbg(acrn_dev.this_device, "Created ioreq client %s.\n", name);
463 return client;
464}
465
466
467
468
469
470void acrn_ioreq_client_destroy(struct acrn_ioreq_client *client)
471{
472 struct acrn_ioreq_range *range, *next;
473 struct acrn_vm *vm = client->vm;
474
475 dev_dbg(acrn_dev.this_device,
476 "Destroy ioreq client %s.\n", client->name);
477 ioreq_pause();
478 set_bit(ACRN_IOREQ_CLIENT_DESTROYING, &client->flags);
479 if (client->is_default)
480 wake_up_interruptible(&client->wq);
481 else
482 kthread_stop(client->thread);
483
484 spin_lock_bh(&vm->ioreq_clients_lock);
485 if (client->is_default)
486 vm->default_client = NULL;
487 else
488 list_del(&client->list);
489 spin_unlock_bh(&vm->ioreq_clients_lock);
490
491 write_lock_bh(&client->range_lock);
492 list_for_each_entry_safe(range, next, &client->range_list, list) {
493 list_del(&range->list);
494 kfree(range);
495 }
496 write_unlock_bh(&client->range_lock);
497 kfree(client);
498
499 ioreq_resume();
500}
501
502static int acrn_ioreq_dispatch(struct acrn_vm *vm)
503{
504 struct acrn_ioreq_client *client;
505 struct acrn_io_request *req;
506 int i;
507
508 for (i = 0; i < vm->vcpu_num; i++) {
509 req = vm->ioreq_buf->req_slot + i;
510
511
512 if (smp_load_acquire(&req->processed) ==
513 ACRN_IOREQ_STATE_PENDING) {
514
515 if (test_bit(ACRN_VM_FLAG_CLEARING_IOREQ, &vm->flags)) {
516 ioreq_complete_request(vm, i, req);
517 continue;
518 }
519 if (handle_cf8cfc(vm, req, i))
520 continue;
521
522 spin_lock_bh(&vm->ioreq_clients_lock);
523 client = find_ioreq_client(vm, req);
524 if (!client) {
525 dev_err(acrn_dev.this_device,
526 "Failed to find ioreq client!\n");
527 spin_unlock_bh(&vm->ioreq_clients_lock);
528 return -EINVAL;
529 }
530 if (!client->is_default)
531 req->kernel_handled = 1;
532 else
533 req->kernel_handled = 0;
534
535
536
537
538 smp_store_release(&req->processed,
539 ACRN_IOREQ_STATE_PROCESSING);
540 set_bit(i, client->ioreqs_map);
541 wake_up_interruptible(&client->wq);
542 spin_unlock_bh(&vm->ioreq_clients_lock);
543 }
544 }
545
546 return 0;
547}
548
549static void ioreq_dispatcher(struct work_struct *work)
550{
551 struct acrn_vm *vm;
552
553 read_lock(&acrn_vm_list_lock);
554 list_for_each_entry(vm, &acrn_vm_list, list) {
555 if (!vm->ioreq_buf)
556 break;
557 acrn_ioreq_dispatch(vm);
558 }
559 read_unlock(&acrn_vm_list_lock);
560}
561
562static void ioreq_intr_handler(void)
563{
564 queue_work(ioreq_wq, &ioreq_work);
565}
566
567static void ioreq_pause(void)
568{
569
570 acrn_remove_intr_handler();
571 drain_workqueue(ioreq_wq);
572}
573
574static void ioreq_resume(void)
575{
576
577 acrn_setup_intr_handler(ioreq_intr_handler);
578 queue_work(ioreq_wq, &ioreq_work);
579}
580
581int acrn_ioreq_intr_setup(void)
582{
583 acrn_setup_intr_handler(ioreq_intr_handler);
584 ioreq_wq = alloc_workqueue("ioreq_wq",
585 WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
586 if (!ioreq_wq) {
587 dev_err(acrn_dev.this_device, "Failed to alloc workqueue!\n");
588 acrn_remove_intr_handler();
589 return -ENOMEM;
590 }
591 return 0;
592}
593
594void acrn_ioreq_intr_remove(void)
595{
596 if (ioreq_wq)
597 destroy_workqueue(ioreq_wq);
598 acrn_remove_intr_handler();
599}
600
601int acrn_ioreq_init(struct acrn_vm *vm, u64 buf_vma)
602{
603 struct acrn_ioreq_buffer *set_buffer;
604 struct page *page;
605 int ret;
606
607 if (vm->ioreq_buf)
608 return -EEXIST;
609
610 set_buffer = kzalloc(sizeof(*set_buffer), GFP_KERNEL);
611 if (!set_buffer)
612 return -ENOMEM;
613
614 ret = pin_user_pages_fast(buf_vma, 1,
615 FOLL_WRITE | FOLL_LONGTERM, &page);
616 if (unlikely(ret != 1) || !page) {
617 dev_err(acrn_dev.this_device, "Failed to pin ioreq page!\n");
618 ret = -EFAULT;
619 goto free_buf;
620 }
621
622 vm->ioreq_buf = page_address(page);
623 vm->ioreq_page = page;
624 set_buffer->ioreq_buf = page_to_phys(page);
625 ret = hcall_set_ioreq_buffer(vm->vmid, virt_to_phys(set_buffer));
626 if (ret < 0) {
627 dev_err(acrn_dev.this_device, "Failed to init ioreq buffer!\n");
628 unpin_user_page(page);
629 vm->ioreq_buf = NULL;
630 goto free_buf;
631 }
632
633 dev_dbg(acrn_dev.this_device,
634 "Init ioreq buffer %pK!\n", vm->ioreq_buf);
635 ret = 0;
636free_buf:
637 kfree(set_buffer);
638 return ret;
639}
640
641void acrn_ioreq_deinit(struct acrn_vm *vm)
642{
643 struct acrn_ioreq_client *client, *next;
644
645 dev_dbg(acrn_dev.this_device,
646 "Deinit ioreq buffer %pK!\n", vm->ioreq_buf);
647
648 list_for_each_entry_safe(client, next, &vm->ioreq_clients, list)
649 acrn_ioreq_client_destroy(client);
650 if (vm->default_client)
651 acrn_ioreq_client_destroy(vm->default_client);
652
653 if (vm->ioreq_buf && vm->ioreq_page) {
654 unpin_user_page(vm->ioreq_page);
655 vm->ioreq_buf = NULL;
656 }
657}
658