1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/pci.h>
22#include <linux/sched.h>
23#include <linux/uaccess.h>
24#include <linux/dmaengine.h>
25#include <linux/mic_common.h>
26#include "../common/mic_dev.h"
27#include "mic_device.h"
28#include "mic_smpt.h"
29#include "mic_virtio.h"
30
31
32
33
34
35#define MIC_INT_DMA_BUF_SIZE PAGE_ALIGN(64 * 1024ULL)
36
37static int mic_sync_dma(struct mic_device *mdev, dma_addr_t dst,
38 dma_addr_t src, size_t len)
39{
40 int err = 0;
41 struct dma_async_tx_descriptor *tx;
42 struct dma_chan *mic_ch = mdev->dma_ch[0];
43
44 if (!mic_ch) {
45 err = -EBUSY;
46 goto error;
47 }
48
49 tx = mic_ch->device->device_prep_dma_memcpy(mic_ch, dst, src, len,
50 DMA_PREP_FENCE);
51 if (!tx) {
52 err = -ENOMEM;
53 goto error;
54 } else {
55 dma_cookie_t cookie = tx->tx_submit(tx);
56
57 err = dma_submit_error(cookie);
58 if (err)
59 goto error;
60 err = dma_sync_wait(mic_ch, cookie);
61 }
62error:
63 if (err)
64 dev_err(&mdev->pdev->dev, "%s %d err %d\n",
65 __func__, __LINE__, err);
66 return err;
67}
68
69
70
71
72
73
74
75static int mic_virtio_copy_to_user(struct mic_vdev *mvdev, void __user *ubuf,
76 size_t len, u64 daddr, size_t dlen,
77 int vr_idx)
78{
79 struct mic_device *mdev = mvdev->mdev;
80 void __iomem *dbuf = mdev->aper.va + daddr;
81 struct mic_vringh *mvr = &mvdev->mvr[vr_idx];
82 size_t dma_alignment = 1 << mdev->dma_ch[0]->device->copy_align;
83 size_t dma_offset;
84 size_t partlen;
85 int err;
86
87 dma_offset = daddr - round_down(daddr, dma_alignment);
88 daddr -= dma_offset;
89 len += dma_offset;
90
91 while (len) {
92 partlen = min_t(size_t, len, MIC_INT_DMA_BUF_SIZE);
93
94 err = mic_sync_dma(mdev, mvr->buf_da, daddr,
95 ALIGN(partlen, dma_alignment));
96 if (err)
97 goto err;
98
99 if (copy_to_user(ubuf, mvr->buf + dma_offset,
100 partlen - dma_offset)) {
101 err = -EFAULT;
102 goto err;
103 }
104 daddr += partlen;
105 ubuf += partlen;
106 dbuf += partlen;
107 mvdev->in_bytes_dma += partlen;
108 mvdev->in_bytes += partlen;
109 len -= partlen;
110 dma_offset = 0;
111 }
112 return 0;
113err:
114 dev_err(mic_dev(mvdev), "%s %d err %d\n", __func__, __LINE__, err);
115 return err;
116}
117
118
119
120
121
122
123
124static int mic_virtio_copy_from_user(struct mic_vdev *mvdev, void __user *ubuf,
125 size_t len, u64 daddr, size_t dlen,
126 int vr_idx)
127{
128 struct mic_device *mdev = mvdev->mdev;
129 void __iomem *dbuf = mdev->aper.va + daddr;
130 struct mic_vringh *mvr = &mvdev->mvr[vr_idx];
131 size_t dma_alignment = 1 << mdev->dma_ch[0]->device->copy_align;
132 size_t partlen;
133 int err;
134
135 if (daddr & (dma_alignment - 1)) {
136 mvdev->tx_dst_unaligned += len;
137 goto memcpy;
138 } else if (ALIGN(len, dma_alignment) > dlen) {
139 mvdev->tx_len_unaligned += len;
140 goto memcpy;
141 }
142
143 while (len) {
144 partlen = min_t(size_t, len, MIC_INT_DMA_BUF_SIZE);
145
146 if (copy_from_user(mvr->buf, ubuf, partlen)) {
147 err = -EFAULT;
148 goto err;
149 }
150 err = mic_sync_dma(mdev, daddr, mvr->buf_da,
151 ALIGN(partlen, dma_alignment));
152 if (err)
153 goto err;
154 daddr += partlen;
155 ubuf += partlen;
156 dbuf += partlen;
157 mvdev->out_bytes_dma += partlen;
158 mvdev->out_bytes += partlen;
159 len -= partlen;
160 }
161memcpy:
162
163
164
165
166 if (copy_from_user((void __force *)dbuf, ubuf, len)) {
167 err = -EFAULT;
168 goto err;
169 }
170 mvdev->out_bytes += len;
171 return 0;
172err:
173 dev_err(mic_dev(mvdev), "%s %d err %d\n", __func__, __LINE__, err);
174 return err;
175}
176
177#define MIC_VRINGH_READ true
178
179
180static void mic_notify(struct vringh *vrh)
181{
182 struct mic_vringh *mvrh = container_of(vrh, struct mic_vringh, vrh);
183 struct mic_vdev *mvdev = mvrh->mvdev;
184 s8 db = mvdev->dc->h2c_vdev_db;
185
186 if (db != -1)
187 mvdev->mdev->ops->send_intr(mvdev->mdev, db);
188}
189
190
191static inline u32 mic_vringh_iov_consumed(struct vringh_kiov *iov)
192{
193 int i;
194 u32 total = iov->consumed;
195
196 for (i = 0; i < iov->i; i++)
197 total += iov->iov[i].iov_len;
198 return total;
199}
200
201
202
203
204
205
206
207
208static int mic_vringh_copy(struct mic_vdev *mvdev, struct vringh_kiov *iov,
209 void __user *ubuf, size_t len, bool read, int vr_idx,
210 size_t *out_len)
211{
212 int ret = 0;
213 size_t partlen, tot_len = 0;
214
215 while (len && iov->i < iov->used) {
216 partlen = min(iov->iov[iov->i].iov_len, len);
217 if (read)
218 ret = mic_virtio_copy_to_user(mvdev, ubuf, partlen,
219 (u64)iov->iov[iov->i].iov_base,
220 iov->iov[iov->i].iov_len,
221 vr_idx);
222 else
223 ret = mic_virtio_copy_from_user(mvdev, ubuf, partlen,
224 (u64)iov->iov[iov->i].iov_base,
225 iov->iov[iov->i].iov_len,
226 vr_idx);
227 if (ret) {
228 dev_err(mic_dev(mvdev), "%s %d err %d\n",
229 __func__, __LINE__, ret);
230 break;
231 }
232 len -= partlen;
233 ubuf += partlen;
234 tot_len += partlen;
235 iov->consumed += partlen;
236 iov->iov[iov->i].iov_len -= partlen;
237 iov->iov[iov->i].iov_base += partlen;
238 if (!iov->iov[iov->i].iov_len) {
239
240 iov->iov[iov->i].iov_len = iov->consumed;
241 iov->iov[iov->i].iov_base -= iov->consumed;
242
243 iov->consumed = 0;
244 iov->i++;
245 }
246 }
247 *out_len = tot_len;
248 return ret;
249}
250
251
252
253
254
255static int _mic_virtio_copy(struct mic_vdev *mvdev,
256 struct mic_copy_desc *copy)
257{
258 int ret = 0;
259 u32 iovcnt = copy->iovcnt;
260 struct iovec iov;
261 struct iovec __user *u_iov = copy->iov;
262 void __user *ubuf = NULL;
263 struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx];
264 struct vringh_kiov *riov = &mvr->riov;
265 struct vringh_kiov *wiov = &mvr->wiov;
266 struct vringh *vrh = &mvr->vrh;
267 u16 *head = &mvr->head;
268 struct mic_vring *vr = &mvr->vring;
269 size_t len = 0, out_len;
270
271 copy->out_len = 0;
272
273 if (riov->i == riov->used && wiov->i == wiov->used) {
274 ret = vringh_getdesc_kern(vrh, riov, wiov,
275 head, GFP_KERNEL);
276
277 if (ret <= 0)
278 return ret;
279 }
280 while (iovcnt) {
281 if (!len) {
282
283 ret = copy_from_user(&iov, u_iov, sizeof(*u_iov));
284 if (ret) {
285 ret = -EINVAL;
286 dev_err(mic_dev(mvdev), "%s %d err %d\n",
287 __func__, __LINE__, ret);
288 break;
289 }
290 len = iov.iov_len;
291 ubuf = iov.iov_base;
292 }
293
294 ret = mic_vringh_copy(mvdev, riov, ubuf, len, MIC_VRINGH_READ,
295 copy->vr_idx, &out_len);
296 if (ret) {
297 dev_err(mic_dev(mvdev), "%s %d err %d\n",
298 __func__, __LINE__, ret);
299 break;
300 }
301 len -= out_len;
302 ubuf += out_len;
303 copy->out_len += out_len;
304
305 ret = mic_vringh_copy(mvdev, wiov, ubuf, len, !MIC_VRINGH_READ,
306 copy->vr_idx, &out_len);
307 if (ret) {
308 dev_err(mic_dev(mvdev), "%s %d err %d\n",
309 __func__, __LINE__, ret);
310 break;
311 }
312 len -= out_len;
313 ubuf += out_len;
314 copy->out_len += out_len;
315 if (!len) {
316
317 iovcnt--;
318 u_iov++;
319 }
320
321 if (riov->i == riov->used && wiov->i == wiov->used)
322 break;
323 }
324
325
326
327
328 if (*head != USHRT_MAX && copy->out_len && copy->update_used) {
329 u32 total = 0;
330
331
332 total += mic_vringh_iov_consumed(riov);
333 total += mic_vringh_iov_consumed(wiov);
334 vringh_complete_kern(vrh, *head, total);
335 *head = USHRT_MAX;
336 if (vringh_need_notify_kern(vrh) > 0)
337 vringh_notify(vrh);
338 vringh_kiov_cleanup(riov);
339 vringh_kiov_cleanup(wiov);
340
341 vr->info->avail_idx = vrh->last_avail_idx;
342 }
343 return ret;
344}
345
346static inline int mic_verify_copy_args(struct mic_vdev *mvdev,
347 struct mic_copy_desc *copy)
348{
349 if (copy->vr_idx >= mvdev->dd->num_vq) {
350 dev_err(mic_dev(mvdev), "%s %d err %d\n",
351 __func__, __LINE__, -EINVAL);
352 return -EINVAL;
353 }
354 return 0;
355}
356
357
358int mic_virtio_copy_desc(struct mic_vdev *mvdev,
359 struct mic_copy_desc *copy)
360{
361 int err;
362 struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx];
363
364 err = mic_verify_copy_args(mvdev, copy);
365 if (err)
366 return err;
367
368 mutex_lock(&mvr->vr_mutex);
369 if (!mic_vdevup(mvdev)) {
370 err = -ENODEV;
371 dev_err(mic_dev(mvdev), "%s %d err %d\n",
372 __func__, __LINE__, err);
373 goto err;
374 }
375 err = _mic_virtio_copy(mvdev, copy);
376 if (err) {
377 dev_err(mic_dev(mvdev), "%s %d err %d\n",
378 __func__, __LINE__, err);
379 }
380err:
381 mutex_unlock(&mvr->vr_mutex);
382 return err;
383}
384
385static void mic_virtio_init_post(struct mic_vdev *mvdev)
386{
387 struct mic_vqconfig *vqconfig = mic_vq_config(mvdev->dd);
388 int i;
389
390 for (i = 0; i < mvdev->dd->num_vq; i++) {
391 if (!le64_to_cpu(vqconfig[i].used_address)) {
392 dev_warn(mic_dev(mvdev), "used_address zero??\n");
393 continue;
394 }
395 mvdev->mvr[i].vrh.vring.used =
396 (void __force *)mvdev->mdev->aper.va +
397 le64_to_cpu(vqconfig[i].used_address);
398 }
399
400 mvdev->dc->used_address_updated = 0;
401
402 dev_dbg(mic_dev(mvdev), "%s: device type %d LINKUP\n",
403 __func__, mvdev->virtio_id);
404}
405
406static inline void mic_virtio_device_reset(struct mic_vdev *mvdev)
407{
408 int i;
409
410 dev_dbg(mic_dev(mvdev), "%s: status %d device type %d RESET\n",
411 __func__, mvdev->dd->status, mvdev->virtio_id);
412
413 for (i = 0; i < mvdev->dd->num_vq; i++)
414
415
416
417
418 mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1);
419
420
421 mvdev->dd->status = 0;
422 mvdev->dc->vdev_reset = 0;
423 mvdev->dc->host_ack = 1;
424
425 for (i = 0; i < mvdev->dd->num_vq; i++) {
426 struct vringh *vrh = &mvdev->mvr[i].vrh;
427 mvdev->mvr[i].vring.info->avail_idx = 0;
428 vrh->completed = 0;
429 vrh->last_avail_idx = 0;
430 vrh->last_used_idx = 0;
431 }
432
433 for (i = 0; i < mvdev->dd->num_vq; i++)
434 mutex_unlock(&mvdev->mvr[i].vr_mutex);
435}
436
437void mic_virtio_reset_devices(struct mic_device *mdev)
438{
439 struct list_head *pos, *tmp;
440 struct mic_vdev *mvdev;
441
442 dev_dbg(&mdev->pdev->dev, "%s\n", __func__);
443
444 list_for_each_safe(pos, tmp, &mdev->vdev_list) {
445 mvdev = list_entry(pos, struct mic_vdev, list);
446 mic_virtio_device_reset(mvdev);
447 mvdev->poll_wake = 1;
448 wake_up(&mvdev->waitq);
449 }
450}
451
452void mic_bh_handler(struct work_struct *work)
453{
454 struct mic_vdev *mvdev = container_of(work, struct mic_vdev,
455 virtio_bh_work);
456
457 if (mvdev->dc->used_address_updated)
458 mic_virtio_init_post(mvdev);
459
460 if (mvdev->dc->vdev_reset)
461 mic_virtio_device_reset(mvdev);
462
463 mvdev->poll_wake = 1;
464 wake_up(&mvdev->waitq);
465}
466
467static irqreturn_t mic_virtio_intr_handler(int irq, void *data)
468{
469 struct mic_vdev *mvdev = data;
470 struct mic_device *mdev = mvdev->mdev;
471
472 mdev->ops->intr_workarounds(mdev);
473 schedule_work(&mvdev->virtio_bh_work);
474 return IRQ_HANDLED;
475}
476
477int mic_virtio_config_change(struct mic_vdev *mvdev,
478 void __user *argp)
479{
480 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
481 int ret = 0, retry, i;
482 struct mic_bootparam *bootparam = mvdev->mdev->dp;
483 s8 db = bootparam->h2c_config_db;
484
485 mutex_lock(&mvdev->mdev->mic_mutex);
486 for (i = 0; i < mvdev->dd->num_vq; i++)
487 mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1);
488
489 if (db == -1 || mvdev->dd->type == -1) {
490 ret = -EIO;
491 goto exit;
492 }
493
494 if (copy_from_user(mic_vq_configspace(mvdev->dd),
495 argp, mvdev->dd->config_len)) {
496 dev_err(mic_dev(mvdev), "%s %d err %d\n",
497 __func__, __LINE__, -EFAULT);
498 ret = -EFAULT;
499 goto exit;
500 }
501 mvdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED;
502 mvdev->mdev->ops->send_intr(mvdev->mdev, db);
503
504 for (retry = 100; retry--;) {
505 ret = wait_event_timeout(wake,
506 mvdev->dc->guest_ack, msecs_to_jiffies(100));
507 if (ret)
508 break;
509 }
510
511 dev_dbg(mic_dev(mvdev),
512 "%s %d retry: %d\n", __func__, __LINE__, retry);
513 mvdev->dc->config_change = 0;
514 mvdev->dc->guest_ack = 0;
515exit:
516 for (i = 0; i < mvdev->dd->num_vq; i++)
517 mutex_unlock(&mvdev->mvr[i].vr_mutex);
518 mutex_unlock(&mvdev->mdev->mic_mutex);
519 return ret;
520}
521
522static int mic_copy_dp_entry(struct mic_vdev *mvdev,
523 void __user *argp,
524 __u8 *type,
525 struct mic_device_desc **devpage)
526{
527 struct mic_device *mdev = mvdev->mdev;
528 struct mic_device_desc dd, *dd_config, *devp;
529 struct mic_vqconfig *vqconfig;
530 int ret = 0, i;
531 bool slot_found = false;
532
533 if (copy_from_user(&dd, argp, sizeof(dd))) {
534 dev_err(mic_dev(mvdev), "%s %d err %d\n",
535 __func__, __LINE__, -EFAULT);
536 return -EFAULT;
537 }
538
539 if (mic_aligned_desc_size(&dd) > MIC_MAX_DESC_BLK_SIZE ||
540 dd.num_vq > MIC_MAX_VRINGS) {
541 dev_err(mic_dev(mvdev), "%s %d err %d\n",
542 __func__, __LINE__, -EINVAL);
543 return -EINVAL;
544 }
545
546 dd_config = kmalloc(mic_desc_size(&dd), GFP_KERNEL);
547 if (dd_config == NULL) {
548 dev_err(mic_dev(mvdev), "%s %d err %d\n",
549 __func__, __LINE__, -ENOMEM);
550 return -ENOMEM;
551 }
552 if (copy_from_user(dd_config, argp, mic_desc_size(&dd))) {
553 ret = -EFAULT;
554 dev_err(mic_dev(mvdev), "%s %d err %d\n",
555 __func__, __LINE__, ret);
556 goto exit;
557 }
558
559 vqconfig = mic_vq_config(dd_config);
560 for (i = 0; i < dd.num_vq; i++) {
561 if (le16_to_cpu(vqconfig[i].num) > MIC_MAX_VRING_ENTRIES) {
562 ret = -EINVAL;
563 dev_err(mic_dev(mvdev), "%s %d err %d\n",
564 __func__, __LINE__, ret);
565 goto exit;
566 }
567 }
568
569
570 for (i = sizeof(struct mic_bootparam);
571 i < MIC_DP_SIZE - mic_total_desc_size(dd_config);
572 i += mic_total_desc_size(devp)) {
573 devp = mdev->dp + i;
574 if (devp->type == 0 || devp->type == -1) {
575 slot_found = true;
576 break;
577 }
578 }
579 if (!slot_found) {
580 ret = -EINVAL;
581 dev_err(mic_dev(mvdev), "%s %d err %d\n",
582 __func__, __LINE__, ret);
583 goto exit;
584 }
585
586
587
588
589 *type = dd_config->type;
590 dd_config->type = 0;
591 memcpy(devp, dd_config, mic_desc_size(dd_config));
592
593 *devpage = devp;
594exit:
595 kfree(dd_config);
596 return ret;
597}
598
599static void mic_init_device_ctrl(struct mic_vdev *mvdev,
600 struct mic_device_desc *devpage)
601{
602 struct mic_device_ctrl *dc;
603
604 dc = (void *)devpage + mic_aligned_desc_size(devpage);
605
606 dc->config_change = 0;
607 dc->guest_ack = 0;
608 dc->vdev_reset = 0;
609 dc->host_ack = 0;
610 dc->used_address_updated = 0;
611 dc->c2h_vdev_db = -1;
612 dc->h2c_vdev_db = -1;
613 mvdev->dc = dc;
614}
615
616int mic_virtio_add_device(struct mic_vdev *mvdev,
617 void __user *argp)
618{
619 struct mic_device *mdev = mvdev->mdev;
620 struct mic_device_desc *dd = NULL;
621 struct mic_vqconfig *vqconfig;
622 int vr_size, i, j, ret;
623 u8 type = 0;
624 s8 db;
625 char irqname[10];
626 struct mic_bootparam *bootparam = mdev->dp;
627 u16 num;
628 dma_addr_t vr_addr;
629
630 mutex_lock(&mdev->mic_mutex);
631
632 ret = mic_copy_dp_entry(mvdev, argp, &type, &dd);
633 if (ret) {
634 mutex_unlock(&mdev->mic_mutex);
635 return ret;
636 }
637
638 mic_init_device_ctrl(mvdev, dd);
639
640 mvdev->dd = dd;
641 mvdev->virtio_id = type;
642 vqconfig = mic_vq_config(dd);
643 INIT_WORK(&mvdev->virtio_bh_work, mic_bh_handler);
644
645 for (i = 0; i < dd->num_vq; i++) {
646 struct mic_vringh *mvr = &mvdev->mvr[i];
647 struct mic_vring *vr = &mvdev->mvr[i].vring;
648 num = le16_to_cpu(vqconfig[i].num);
649 mutex_init(&mvr->vr_mutex);
650 vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) +
651 sizeof(struct _mic_vring_info));
652 vr->va = (void *)
653 __get_free_pages(GFP_KERNEL | __GFP_ZERO,
654 get_order(vr_size));
655 if (!vr->va) {
656 ret = -ENOMEM;
657 dev_err(mic_dev(mvdev), "%s %d err %d\n",
658 __func__, __LINE__, ret);
659 goto err;
660 }
661 vr->len = vr_size;
662 vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN);
663 vr->info->magic = cpu_to_le32(MIC_MAGIC + mvdev->virtio_id + i);
664 vr_addr = mic_map_single(mdev, vr->va, vr_size);
665 if (mic_map_error(vr_addr)) {
666 free_pages((unsigned long)vr->va, get_order(vr_size));
667 ret = -ENOMEM;
668 dev_err(mic_dev(mvdev), "%s %d err %d\n",
669 __func__, __LINE__, ret);
670 goto err;
671 }
672 vqconfig[i].address = cpu_to_le64(vr_addr);
673
674 vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN);
675 ret = vringh_init_kern(&mvr->vrh,
676 *(u32 *)mic_vq_features(mvdev->dd), num, false,
677 vr->vr.desc, vr->vr.avail, vr->vr.used);
678 if (ret) {
679 dev_err(mic_dev(mvdev), "%s %d err %d\n",
680 __func__, __LINE__, ret);
681 goto err;
682 }
683 vringh_kiov_init(&mvr->riov, NULL, 0);
684 vringh_kiov_init(&mvr->wiov, NULL, 0);
685 mvr->head = USHRT_MAX;
686 mvr->mvdev = mvdev;
687 mvr->vrh.notify = mic_notify;
688 dev_dbg(&mdev->pdev->dev,
689 "%s %d index %d va %p info %p vr_size 0x%x\n",
690 __func__, __LINE__, i, vr->va, vr->info, vr_size);
691 mvr->buf = (void *)__get_free_pages(GFP_KERNEL,
692 get_order(MIC_INT_DMA_BUF_SIZE));
693 mvr->buf_da = mic_map_single(mvdev->mdev, mvr->buf,
694 MIC_INT_DMA_BUF_SIZE);
695 }
696
697 snprintf(irqname, sizeof(irqname), "mic%dvirtio%d", mdev->id,
698 mvdev->virtio_id);
699 mvdev->virtio_db = mic_next_db(mdev);
700 mvdev->virtio_cookie = mic_request_threaded_irq(mdev,
701 mic_virtio_intr_handler,
702 NULL, irqname, mvdev,
703 mvdev->virtio_db, MIC_INTR_DB);
704 if (IS_ERR(mvdev->virtio_cookie)) {
705 ret = PTR_ERR(mvdev->virtio_cookie);
706 dev_dbg(&mdev->pdev->dev, "request irq failed\n");
707 goto err;
708 }
709
710 mvdev->dc->c2h_vdev_db = mvdev->virtio_db;
711
712 list_add_tail(&mvdev->list, &mdev->vdev_list);
713
714
715
716
717
718
719 smp_wmb();
720 dd->type = type;
721
722 dev_dbg(&mdev->pdev->dev, "Added virtio device id %d\n", dd->type);
723
724 db = bootparam->h2c_config_db;
725 if (db != -1)
726 mdev->ops->send_intr(mdev, db);
727 mutex_unlock(&mdev->mic_mutex);
728 return 0;
729err:
730 vqconfig = mic_vq_config(dd);
731 for (j = 0; j < i; j++) {
732 struct mic_vringh *mvr = &mvdev->mvr[j];
733 mic_unmap_single(mdev, le64_to_cpu(vqconfig[j].address),
734 mvr->vring.len);
735 free_pages((unsigned long)mvr->vring.va,
736 get_order(mvr->vring.len));
737 }
738 mutex_unlock(&mdev->mic_mutex);
739 return ret;
740}
741
742void mic_virtio_del_device(struct mic_vdev *mvdev)
743{
744 struct list_head *pos, *tmp;
745 struct mic_vdev *tmp_mvdev;
746 struct mic_device *mdev = mvdev->mdev;
747 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
748 int i, ret, retry;
749 struct mic_vqconfig *vqconfig;
750 struct mic_bootparam *bootparam = mdev->dp;
751 s8 db;
752
753 mutex_lock(&mdev->mic_mutex);
754 db = bootparam->h2c_config_db;
755 if (db == -1)
756 goto skip_hot_remove;
757 dev_dbg(&mdev->pdev->dev,
758 "Requesting hot remove id %d\n", mvdev->virtio_id);
759 mvdev->dc->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE;
760 mdev->ops->send_intr(mdev, db);
761 for (retry = 100; retry--;) {
762 ret = wait_event_timeout(wake,
763 mvdev->dc->guest_ack, msecs_to_jiffies(100));
764 if (ret)
765 break;
766 }
767 dev_dbg(&mdev->pdev->dev,
768 "Device id %d config_change %d guest_ack %d retry %d\n",
769 mvdev->virtio_id, mvdev->dc->config_change,
770 mvdev->dc->guest_ack, retry);
771 mvdev->dc->config_change = 0;
772 mvdev->dc->guest_ack = 0;
773skip_hot_remove:
774 mic_free_irq(mdev, mvdev->virtio_cookie, mvdev);
775 flush_work(&mvdev->virtio_bh_work);
776 vqconfig = mic_vq_config(mvdev->dd);
777 for (i = 0; i < mvdev->dd->num_vq; i++) {
778 struct mic_vringh *mvr = &mvdev->mvr[i];
779
780 mic_unmap_single(mvdev->mdev, mvr->buf_da,
781 MIC_INT_DMA_BUF_SIZE);
782 free_pages((unsigned long)mvr->buf,
783 get_order(MIC_INT_DMA_BUF_SIZE));
784 vringh_kiov_cleanup(&mvr->riov);
785 vringh_kiov_cleanup(&mvr->wiov);
786 mic_unmap_single(mdev, le64_to_cpu(vqconfig[i].address),
787 mvr->vring.len);
788 free_pages((unsigned long)mvr->vring.va,
789 get_order(mvr->vring.len));
790 }
791
792 list_for_each_safe(pos, tmp, &mdev->vdev_list) {
793 tmp_mvdev = list_entry(pos, struct mic_vdev, list);
794 if (tmp_mvdev == mvdev) {
795 list_del(pos);
796 dev_dbg(&mdev->pdev->dev,
797 "Removing virtio device id %d\n",
798 mvdev->virtio_id);
799 break;
800 }
801 }
802
803
804
805
806
807
808 smp_wmb();
809 mvdev->dd->type = -1;
810 mutex_unlock(&mdev->mic_mutex);
811}
812