1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/objtool.h>
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/slab.h>
31#include <linux/mem_encrypt.h>
32
33#include <asm/hypervisor.h>
34#include <drm/drm_ioctl.h>
35
36#include "vmwgfx_drv.h"
37#include "vmwgfx_msg_x86.h"
38#include "vmwgfx_msg_arm64.h"
39#include "vmwgfx_mksstat.h"
40
41#define MESSAGE_STATUS_SUCCESS 0x0001
42#define MESSAGE_STATUS_DORECV 0x0002
43#define MESSAGE_STATUS_CPT 0x0010
44#define MESSAGE_STATUS_HB 0x0080
45
46#define RPCI_PROTOCOL_NUM 0x49435052
47#define GUESTMSG_FLAG_COOKIE 0x80000000
48
49#define RETRIES 3
50
51#define VMW_HYPERVISOR_MAGIC 0x564D5868
52
53#define VMW_PORT_CMD_MSG 30
54#define VMW_PORT_CMD_HB_MSG 0
55#define VMW_PORT_CMD_OPEN_CHANNEL (MSG_TYPE_OPEN << 16 | VMW_PORT_CMD_MSG)
56#define VMW_PORT_CMD_CLOSE_CHANNEL (MSG_TYPE_CLOSE << 16 | VMW_PORT_CMD_MSG)
57#define VMW_PORT_CMD_SENDSIZE (MSG_TYPE_SENDSIZE << 16 | VMW_PORT_CMD_MSG)
58#define VMW_PORT_CMD_RECVSIZE (MSG_TYPE_RECVSIZE << 16 | VMW_PORT_CMD_MSG)
59#define VMW_PORT_CMD_RECVSTATUS (MSG_TYPE_RECVSTATUS << 16 | VMW_PORT_CMD_MSG)
60
61#define VMW_PORT_CMD_MKS_GUEST_STATS 85
62#define VMW_PORT_CMD_MKSGS_RESET (0 << 16 | VMW_PORT_CMD_MKS_GUEST_STATS)
63#define VMW_PORT_CMD_MKSGS_ADD_PPN (1 << 16 | VMW_PORT_CMD_MKS_GUEST_STATS)
64#define VMW_PORT_CMD_MKSGS_REMOVE_PPN (2 << 16 | VMW_PORT_CMD_MKS_GUEST_STATS)
65
66#define HIGH_WORD(X) ((X & 0xFFFF0000) >> 16)
67
68#define MAX_USER_MSG_LENGTH PAGE_SIZE
69
70static u32 vmw_msg_enabled = 1;
71
72enum rpc_msg_type {
73 MSG_TYPE_OPEN,
74 MSG_TYPE_SENDSIZE,
75 MSG_TYPE_SENDPAYLOAD,
76 MSG_TYPE_RECVSIZE,
77 MSG_TYPE_RECVPAYLOAD,
78 MSG_TYPE_RECVSTATUS,
79 MSG_TYPE_CLOSE,
80};
81
82struct rpc_channel {
83 u16 channel_id;
84 u32 cookie_high;
85 u32 cookie_low;
86};
87
88
89
90
91
92
93
94
95
96
97
98static int vmw_open_channel(struct rpc_channel *channel, unsigned int protocol)
99{
100 unsigned long eax, ebx, ecx, edx, si = 0, di = 0;
101
102 VMW_PORT(VMW_PORT_CMD_OPEN_CHANNEL,
103 (protocol | GUESTMSG_FLAG_COOKIE), si, di,
104 0,
105 VMW_HYPERVISOR_MAGIC,
106 eax, ebx, ecx, edx, si, di);
107
108 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
109 return -EINVAL;
110
111 channel->channel_id = HIGH_WORD(edx);
112 channel->cookie_high = si;
113 channel->cookie_low = di;
114
115 return 0;
116}
117
118
119
120
121
122
123
124
125
126
127static int vmw_close_channel(struct rpc_channel *channel)
128{
129 unsigned long eax, ebx, ecx, edx, si, di;
130
131
132 si = channel->cookie_high;
133 di = channel->cookie_low;
134
135 VMW_PORT(VMW_PORT_CMD_CLOSE_CHANNEL,
136 0, si, di,
137 channel->channel_id << 16,
138 VMW_HYPERVISOR_MAGIC,
139 eax, ebx, ecx, edx, si, di);
140
141 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
142 return -EINVAL;
143
144 return 0;
145}
146
147
148
149
150
151
152
153
154
155
156static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
157 const char *msg, bool hb)
158{
159 unsigned long si, di, eax, ebx, ecx, edx;
160 unsigned long msg_len = strlen(msg);
161
162
163 if (hb && !mem_encrypt_active()) {
164 unsigned long bp = channel->cookie_high;
165 u32 channel_id = (channel->channel_id << 16);
166
167 si = (uintptr_t) msg;
168 di = channel->cookie_low;
169
170 VMW_PORT_HB_OUT(
171 (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
172 msg_len, si, di,
173 VMWARE_HYPERVISOR_HB | channel_id |
174 VMWARE_HYPERVISOR_OUT,
175 VMW_HYPERVISOR_MAGIC, bp,
176 eax, ebx, ecx, edx, si, di);
177
178 return ebx;
179 }
180
181
182 ecx = MESSAGE_STATUS_SUCCESS << 16;
183 while (msg_len && (HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS)) {
184 unsigned int bytes = min_t(size_t, msg_len, 4);
185 unsigned long word = 0;
186
187 memcpy(&word, msg, bytes);
188 msg_len -= bytes;
189 msg += bytes;
190 si = channel->cookie_high;
191 di = channel->cookie_low;
192
193 VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_SENDPAYLOAD << 16),
194 word, si, di,
195 channel->channel_id << 16,
196 VMW_HYPERVISOR_MAGIC,
197 eax, ebx, ecx, edx, si, di);
198 }
199
200 return ecx;
201}
202
203
204
205
206
207
208
209
210
211
212
213static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
214 unsigned long reply_len, bool hb)
215{
216 unsigned long si, di, eax, ebx, ecx, edx;
217
218
219 if (hb && !mem_encrypt_active()) {
220 unsigned long bp = channel->cookie_low;
221 u32 channel_id = (channel->channel_id << 16);
222
223 si = channel->cookie_high;
224 di = (uintptr_t) reply;
225
226 VMW_PORT_HB_IN(
227 (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
228 reply_len, si, di,
229 VMWARE_HYPERVISOR_HB | channel_id,
230 VMW_HYPERVISOR_MAGIC, bp,
231 eax, ebx, ecx, edx, si, di);
232
233 return ebx;
234 }
235
236
237 ecx = MESSAGE_STATUS_SUCCESS << 16;
238 while (reply_len) {
239 unsigned int bytes = min_t(unsigned long, reply_len, 4);
240
241 si = channel->cookie_high;
242 di = channel->cookie_low;
243
244 VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_RECVPAYLOAD << 16),
245 MESSAGE_STATUS_SUCCESS, si, di,
246 channel->channel_id << 16,
247 VMW_HYPERVISOR_MAGIC,
248 eax, ebx, ecx, edx, si, di);
249
250 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
251 break;
252
253 memcpy(reply, &ebx, bytes);
254 reply_len -= bytes;
255 reply += bytes;
256 }
257
258 return ecx;
259}
260
261
262
263
264
265
266
267
268
269
270static int vmw_send_msg(struct rpc_channel *channel, const char *msg)
271{
272 unsigned long eax, ebx, ecx, edx, si, di;
273 size_t msg_len = strlen(msg);
274 int retries = 0;
275
276 while (retries < RETRIES) {
277 retries++;
278
279
280 si = channel->cookie_high;
281 di = channel->cookie_low;
282
283 VMW_PORT(VMW_PORT_CMD_SENDSIZE,
284 msg_len, si, di,
285 channel->channel_id << 16,
286 VMW_HYPERVISOR_MAGIC,
287 eax, ebx, ecx, edx, si, di);
288
289 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
290
291 return -EINVAL;
292 }
293
294
295 ebx = vmw_port_hb_out(channel, msg,
296 !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
297
298 if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) != 0) {
299 return 0;
300 } else if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
301
302 continue;
303 } else {
304 break;
305 }
306 }
307
308 return -EINVAL;
309}
310STACK_FRAME_NON_STANDARD(vmw_send_msg);
311
312
313
314
315
316
317
318
319
320
321
322static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
323 size_t *msg_len)
324{
325 unsigned long eax, ebx, ecx, edx, si, di;
326 char *reply;
327 size_t reply_len;
328 int retries = 0;
329
330
331 *msg_len = 0;
332 *msg = NULL;
333
334 while (retries < RETRIES) {
335 retries++;
336
337
338 si = channel->cookie_high;
339 di = channel->cookie_low;
340
341 VMW_PORT(VMW_PORT_CMD_RECVSIZE,
342 0, si, di,
343 channel->channel_id << 16,
344 VMW_HYPERVISOR_MAGIC,
345 eax, ebx, ecx, edx, si, di);
346
347 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
348 DRM_ERROR("Failed to get reply size for host message.\n");
349 return -EINVAL;
350 }
351
352
353 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_DORECV) == 0)
354 return 0;
355
356 reply_len = ebx;
357 reply = kzalloc(reply_len + 1, GFP_KERNEL);
358 if (!reply) {
359 DRM_ERROR("Cannot allocate memory for host message reply.\n");
360 return -ENOMEM;
361 }
362
363
364
365 ebx = vmw_port_hb_in(channel, reply, reply_len,
366 !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
367 if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) {
368 kfree(reply);
369 reply = NULL;
370 if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
371
372 continue;
373 }
374
375 return -EINVAL;
376 }
377
378 reply[reply_len] = '\0';
379
380
381
382 si = channel->cookie_high;
383 di = channel->cookie_low;
384
385 VMW_PORT(VMW_PORT_CMD_RECVSTATUS,
386 MESSAGE_STATUS_SUCCESS, si, di,
387 channel->channel_id << 16,
388 VMW_HYPERVISOR_MAGIC,
389 eax, ebx, ecx, edx, si, di);
390
391 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
392 kfree(reply);
393 reply = NULL;
394 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) {
395
396 continue;
397 }
398
399 return -EINVAL;
400 }
401
402 break;
403 }
404
405 if (!reply)
406 return -EINVAL;
407
408 *msg_len = reply_len;
409 *msg = reply;
410
411 return 0;
412}
413STACK_FRAME_NON_STANDARD(vmw_recv_msg);
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428int vmw_host_get_guestinfo(const char *guest_info_param,
429 char *buffer, size_t *length)
430{
431 struct rpc_channel channel;
432 char *msg, *reply = NULL;
433 size_t reply_len = 0;
434
435 if (!vmw_msg_enabled)
436 return -ENODEV;
437
438 if (!guest_info_param || !length)
439 return -EINVAL;
440
441 msg = kasprintf(GFP_KERNEL, "info-get %s", guest_info_param);
442 if (!msg) {
443 DRM_ERROR("Cannot allocate memory to get guest info \"%s\".",
444 guest_info_param);
445 return -ENOMEM;
446 }
447
448 if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
449 goto out_open;
450
451 if (vmw_send_msg(&channel, msg) ||
452 vmw_recv_msg(&channel, (void *) &reply, &reply_len))
453 goto out_msg;
454
455 vmw_close_channel(&channel);
456 if (buffer && reply && reply_len > 0) {
457
458
459
460 reply_len = max(reply_len - 2, (size_t) 0);
461 reply_len = min(reply_len, *length);
462
463 if (reply_len > 0)
464 memcpy(buffer, reply + 2, reply_len);
465 }
466
467 *length = reply_len;
468
469 kfree(reply);
470 kfree(msg);
471
472 return 0;
473
474out_msg:
475 vmw_close_channel(&channel);
476 kfree(reply);
477out_open:
478 *length = 0;
479 kfree(msg);
480 DRM_ERROR("Failed to get guest info \"%s\".", guest_info_param);
481
482 return -EINVAL;
483}
484
485
486
487
488
489
490
491
492
493__printf(1, 2)
494int vmw_host_printf(const char *fmt, ...)
495{
496 va_list ap;
497 struct rpc_channel channel;
498 char *msg;
499 char *log;
500 int ret = 0;
501
502 if (!vmw_msg_enabled)
503 return -ENODEV;
504
505 if (!fmt)
506 return ret;
507
508 va_start(ap, fmt);
509 log = kvasprintf(GFP_KERNEL, fmt, ap);
510 va_end(ap);
511 if (!log) {
512 DRM_ERROR("Cannot allocate memory for the log message.\n");
513 return -ENOMEM;
514 }
515
516 msg = kasprintf(GFP_KERNEL, "log %s", log);
517 if (!msg) {
518 DRM_ERROR("Cannot allocate memory for host log message.\n");
519 kfree(log);
520 return -ENOMEM;
521 }
522
523 if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
524 goto out_open;
525
526 if (vmw_send_msg(&channel, msg))
527 goto out_msg;
528
529 vmw_close_channel(&channel);
530 kfree(msg);
531 kfree(log);
532
533 return 0;
534
535out_msg:
536 vmw_close_channel(&channel);
537out_open:
538 kfree(msg);
539 kfree(log);
540 DRM_ERROR("Failed to send host log message.\n");
541
542 return -EINVAL;
543}
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558int vmw_msg_ioctl(struct drm_device *dev, void *data,
559 struct drm_file *file_priv)
560{
561 struct drm_vmw_msg_arg *arg =
562 (struct drm_vmw_msg_arg *)data;
563 struct rpc_channel channel;
564 char *msg;
565 int length;
566
567 msg = kmalloc(MAX_USER_MSG_LENGTH, GFP_KERNEL);
568 if (!msg) {
569 DRM_ERROR("Cannot allocate memory for log message.\n");
570 return -ENOMEM;
571 }
572
573 length = strncpy_from_user(msg, (void __user *)((unsigned long)arg->send),
574 MAX_USER_MSG_LENGTH);
575 if (length < 0 || length >= MAX_USER_MSG_LENGTH) {
576 DRM_ERROR("Userspace message access failure.\n");
577 kfree(msg);
578 return -EINVAL;
579 }
580
581
582 if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM)) {
583 DRM_ERROR("Failed to open channel.\n");
584 goto out_open;
585 }
586
587 if (vmw_send_msg(&channel, msg)) {
588 DRM_ERROR("Failed to send message to host.\n");
589 goto out_msg;
590 }
591
592 if (!arg->send_only) {
593 char *reply = NULL;
594 size_t reply_len = 0;
595
596 if (vmw_recv_msg(&channel, (void *) &reply, &reply_len)) {
597 DRM_ERROR("Failed to receive message from host.\n");
598 goto out_msg;
599 }
600 if (reply && reply_len > 0) {
601 if (copy_to_user((void __user *)((unsigned long)arg->receive),
602 reply, reply_len)) {
603 DRM_ERROR("Failed to copy message to userspace.\n");
604 kfree(reply);
605 goto out_msg;
606 }
607 arg->receive_len = (__u32)reply_len;
608 }
609 kfree(reply);
610 }
611
612 vmw_close_channel(&channel);
613 kfree(msg);
614
615 return 0;
616
617out_msg:
618 vmw_close_channel(&channel);
619out_open:
620 kfree(msg);
621
622 return -EINVAL;
623}
624
625
626
627
628
629
630
631static inline void reset_ppn_array(PPN64 *arr, size_t size)
632{
633 size_t i;
634
635 BUG_ON(!arr || size == 0);
636
637 for (i = 0; i < size; ++i)
638 arr[i] = INVALID_PPN64;
639}
640
641
642
643
644
645
646static inline void hypervisor_ppn_reset_all(void)
647{
648 unsigned long eax, ebx, ecx, edx, si = 0, di = 0;
649
650 VMW_PORT(VMW_PORT_CMD_MKSGS_RESET,
651 0, si, di,
652 0,
653 VMW_HYPERVISOR_MAGIC,
654 eax, ebx, ecx, edx, si, di);
655}
656
657
658
659
660
661
662
663static inline void hypervisor_ppn_add(PPN64 pfn)
664{
665 unsigned long eax, ebx, ecx, edx, si = 0, di = 0;
666
667 VMW_PORT(VMW_PORT_CMD_MKSGS_ADD_PPN,
668 (unsigned long)pfn, si, di,
669 0,
670 VMW_HYPERVISOR_MAGIC,
671 eax, ebx, ecx, edx, si, di);
672}
673
674
675
676
677
678
679
680static inline void hypervisor_ppn_remove(PPN64 pfn)
681{
682 unsigned long eax, ebx, ecx, edx, si = 0, di = 0;
683
684 VMW_PORT(VMW_PORT_CMD_MKSGS_REMOVE_PPN,
685 (unsigned long)pfn, si, di,
686 0,
687 VMW_HYPERVISOR_MAGIC,
688 eax, ebx, ecx, edx, si, di);
689}
690
691#if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS)
692
693
694#define MKSSTAT_KERNEL_PAGES_ORDER 2
695
696#define MKSSTAT_KERNEL_DESCRIPTION "vmwgfx"
697
698
699static const char* const mksstat_kern_name_desc[MKSSTAT_KERN_COUNT][2] =
700{
701 { "vmw_execbuf_ioctl", "vmw_execbuf_ioctl" },
702};
703
704
705
706
707
708
709
710
711
712
713
714
715static inline char *mksstat_init_record(mksstat_kern_stats_t stat_idx,
716 MKSGuestStatCounterTime *pstat, MKSGuestStatInfoEntry *pinfo, char *pstrs)
717{
718 char *const pstrd = pstrs + strlen(mksstat_kern_name_desc[stat_idx][0]) + 1;
719 strcpy(pstrs, mksstat_kern_name_desc[stat_idx][0]);
720 strcpy(pstrd, mksstat_kern_name_desc[stat_idx][1]);
721
722 pinfo[stat_idx].name.s = pstrs;
723 pinfo[stat_idx].description.s = pstrd;
724 pinfo[stat_idx].flags = MKS_GUEST_STAT_FLAG_NONE;
725 pinfo[stat_idx].stat.counter = (MKSGuestStatCounter *)&pstat[stat_idx];
726
727 return pstrd + strlen(mksstat_kern_name_desc[stat_idx][1]) + 1;
728}
729
730
731
732
733
734
735
736
737
738
739
740
741static inline char *mksstat_init_record_time(mksstat_kern_stats_t stat_idx,
742 MKSGuestStatCounterTime *pstat, MKSGuestStatInfoEntry *pinfo, char *pstrs)
743{
744 char *const pstrd = pstrs + strlen(mksstat_kern_name_desc[stat_idx][0]) + 1;
745 strcpy(pstrs, mksstat_kern_name_desc[stat_idx][0]);
746 strcpy(pstrd, mksstat_kern_name_desc[stat_idx][1]);
747
748 pinfo[stat_idx].name.s = pstrs;
749 pinfo[stat_idx].description.s = pstrd;
750 pinfo[stat_idx].flags = MKS_GUEST_STAT_FLAG_TIME;
751 pinfo[stat_idx].stat.counterTime = &pstat[stat_idx];
752
753 return pstrd + strlen(mksstat_kern_name_desc[stat_idx][1]) + 1;
754}
755
756
757
758
759
760
761
762
763
764
765
766
767
768static int mksstat_init_kern_id(struct page **ppage)
769{
770 MKSGuestStatInstanceDescriptor *pdesc;
771 MKSGuestStatCounterTime *pstat;
772 MKSGuestStatInfoEntry *pinfo;
773 char *pstrs, *pstrs_acc;
774
775
776 struct page *page = alloc_pages(GFP_KERNEL | __GFP_ZERO, MKSSTAT_KERNEL_PAGES_ORDER);
777
778 if (!page)
779 return -ENOMEM;
780
781 pdesc = page_address(page);
782 pstat = vmw_mksstat_get_kern_pstat(pdesc);
783 pinfo = vmw_mksstat_get_kern_pinfo(pdesc);
784 pstrs = vmw_mksstat_get_kern_pstrs(pdesc);
785
786
787 pstrs_acc = pstrs;
788 pstrs_acc = mksstat_init_record_time(MKSSTAT_KERN_EXECBUF, pstat, pinfo, pstrs_acc);
789
790
791
792 BUG_ON(pstrs_acc - pstrs > PAGE_SIZE);
793
794
795 pdesc->reservedMBZ = 0;
796 pdesc->statStartVA = (uintptr_t)pstat;
797 pdesc->strsStartVA = (uintptr_t)pstrs;
798 pdesc->statLength = sizeof(*pstat) * MKSSTAT_KERN_COUNT;
799 pdesc->infoLength = sizeof(*pinfo) * MKSSTAT_KERN_COUNT;
800 pdesc->strsLength = pstrs_acc - pstrs;
801 snprintf(pdesc->description, ARRAY_SIZE(pdesc->description) - 1, "%s pid=%d",
802 MKSSTAT_KERNEL_DESCRIPTION, current->pid);
803
804 pdesc->statPPNs[0] = page_to_pfn(virt_to_page(pstat));
805 reset_ppn_array(pdesc->statPPNs + 1, ARRAY_SIZE(pdesc->statPPNs) - 1);
806
807 pdesc->infoPPNs[0] = page_to_pfn(virt_to_page(pinfo));
808 reset_ppn_array(pdesc->infoPPNs + 1, ARRAY_SIZE(pdesc->infoPPNs) - 1);
809
810 pdesc->strsPPNs[0] = page_to_pfn(virt_to_page(pstrs));
811 reset_ppn_array(pdesc->strsPPNs + 1, ARRAY_SIZE(pdesc->strsPPNs) - 1);
812
813 *ppage = page;
814
815 hypervisor_ppn_add((PPN64)page_to_pfn(page));
816
817 return 0;
818}
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833int vmw_mksstat_get_kern_slot(pid_t pid, struct vmw_private *dev_priv)
834{
835 const size_t base = (u32)hash_32(pid, MKSSTAT_CAPACITY_LOG2);
836 size_t i;
837
838 for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_kern_pids); ++i) {
839 const size_t slot = (i + base) % ARRAY_SIZE(dev_priv->mksstat_kern_pids);
840
841
842 if (pid == (pid_t)atomic_read(&dev_priv->mksstat_kern_pids[slot]))
843 return (int)slot;
844
845
846 if (!atomic_cmpxchg(&dev_priv->mksstat_kern_pids[slot], 0, MKSSTAT_PID_RESERVED)) {
847 const int ret = mksstat_init_kern_id(&dev_priv->mksstat_kern_pages[slot]);
848
849 if (!ret) {
850
851 dev_priv->mksstat_kern_top_timer[slot] = MKSSTAT_KERN_COUNT;
852
853 atomic_set(&dev_priv->mksstat_kern_pids[slot], pid);
854 return (int)slot;
855 }
856
857 atomic_set(&dev_priv->mksstat_kern_pids[slot], 0);
858 return ret;
859 }
860 }
861
862 return -ENOSPC;
863}
864
865#endif
866
867
868
869
870
871
872
873
874
875
876
877static void vmw_mksstat_cleanup_descriptor(struct page *page)
878{
879 MKSGuestStatInstanceDescriptor *pdesc = page_address(page);
880 size_t i;
881
882 for (i = 0; i < ARRAY_SIZE(pdesc->statPPNs) && pdesc->statPPNs[i] != INVALID_PPN64; ++i)
883 unpin_user_page(pfn_to_page(pdesc->statPPNs[i]));
884
885 for (i = 0; i < ARRAY_SIZE(pdesc->infoPPNs) && pdesc->infoPPNs[i] != INVALID_PPN64; ++i)
886 unpin_user_page(pfn_to_page(pdesc->infoPPNs[i]));
887
888 for (i = 0; i < ARRAY_SIZE(pdesc->strsPPNs) && pdesc->strsPPNs[i] != INVALID_PPN64; ++i)
889 unpin_user_page(pfn_to_page(pdesc->strsPPNs[i]));
890
891 __free_page(page);
892}
893
894
895
896
897
898
899
900
901
902
903
904
905int vmw_mksstat_remove_all(struct vmw_private *dev_priv)
906{
907 int ret = 0;
908 size_t i;
909
910
911 hypervisor_ppn_reset_all();
912
913
914 for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_user_pids); ++i) {
915 const pid_t pid0 = (pid_t)atomic_read(&dev_priv->mksstat_user_pids[i]);
916
917 if (!pid0)
918 continue;
919
920 if (pid0 != MKSSTAT_PID_RESERVED) {
921 const pid_t pid1 = atomic_cmpxchg(&dev_priv->mksstat_user_pids[i], pid0, MKSSTAT_PID_RESERVED);
922
923 if (!pid1)
924 continue;
925
926 if (pid1 == pid0) {
927 struct page *const page = dev_priv->mksstat_user_pages[i];
928
929 BUG_ON(!page);
930
931 dev_priv->mksstat_user_pages[i] = NULL;
932 atomic_set(&dev_priv->mksstat_user_pids[i], 0);
933
934 vmw_mksstat_cleanup_descriptor(page);
935 continue;
936 }
937 }
938
939 ret = -EAGAIN;
940 }
941
942#if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS)
943
944 for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_kern_pids); ++i) {
945 const pid_t pid0 = (pid_t)atomic_read(&dev_priv->mksstat_kern_pids[i]);
946
947 if (!pid0)
948 continue;
949
950 if (pid0 != MKSSTAT_PID_RESERVED) {
951 const pid_t pid1 = atomic_cmpxchg(&dev_priv->mksstat_kern_pids[i], pid0, MKSSTAT_PID_RESERVED);
952
953 if (!pid1)
954 continue;
955
956 if (pid1 == pid0) {
957 struct page *const page = dev_priv->mksstat_kern_pages[i];
958
959 BUG_ON(!page);
960
961 dev_priv->mksstat_kern_pages[i] = NULL;
962 atomic_set(&dev_priv->mksstat_kern_pids[i], 0);
963
964 __free_pages(page, MKSSTAT_KERNEL_PAGES_ORDER);
965 continue;
966 }
967 }
968
969 ret = -EAGAIN;
970 }
971
972#endif
973 return ret;
974}
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989int vmw_mksstat_reset_ioctl(struct drm_device *dev, void *data,
990 struct drm_file *file_priv)
991{
992 struct vmw_private *const dev_priv = vmw_priv(dev);
993 return vmw_mksstat_remove_all(dev_priv);
994}
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data,
1010 struct drm_file *file_priv)
1011{
1012 struct drm_vmw_mksstat_add_arg *arg =
1013 (struct drm_vmw_mksstat_add_arg *) data;
1014
1015 struct vmw_private *const dev_priv = vmw_priv(dev);
1016
1017 struct page *page;
1018 MKSGuestStatInstanceDescriptor *pdesc;
1019 const size_t num_pages_stat = PFN_UP(arg->stat_len);
1020 const size_t num_pages_info = PFN_UP(arg->info_len);
1021 const size_t num_pages_strs = PFN_UP(arg->strs_len);
1022 long desc_len;
1023 long nr_pinned_stat;
1024 long nr_pinned_info;
1025 long nr_pinned_strs;
1026 struct page *pages_stat[ARRAY_SIZE(pdesc->statPPNs)];
1027 struct page *pages_info[ARRAY_SIZE(pdesc->infoPPNs)];
1028 struct page *pages_strs[ARRAY_SIZE(pdesc->strsPPNs)];
1029 size_t i, slot;
1030
1031 arg->id = -1;
1032
1033 if (!arg->stat || !arg->info || !arg->strs)
1034 return -EINVAL;
1035
1036 if (!arg->stat_len || !arg->info_len || !arg->strs_len)
1037 return -EINVAL;
1038
1039 if (!arg->description)
1040 return -EINVAL;
1041
1042 if (num_pages_stat > ARRAY_SIZE(pdesc->statPPNs) ||
1043 num_pages_info > ARRAY_SIZE(pdesc->infoPPNs) ||
1044 num_pages_strs > ARRAY_SIZE(pdesc->strsPPNs))
1045 return -EINVAL;
1046
1047
1048 for (slot = 0; slot < ARRAY_SIZE(dev_priv->mksstat_user_pids); ++slot)
1049 if (!atomic_cmpxchg(&dev_priv->mksstat_user_pids[slot], 0, MKSSTAT_PID_RESERVED))
1050 break;
1051
1052 if (slot == ARRAY_SIZE(dev_priv->mksstat_user_pids))
1053 return -ENOSPC;
1054
1055 BUG_ON(dev_priv->mksstat_user_pages[slot]);
1056
1057
1058 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1059
1060 if (!page) {
1061 atomic_set(&dev_priv->mksstat_user_pids[slot], 0);
1062 return -ENOMEM;
1063 }
1064
1065
1066 pdesc = page_address(page);
1067
1068 pdesc->reservedMBZ = 0;
1069 pdesc->statStartVA = arg->stat;
1070 pdesc->strsStartVA = arg->strs;
1071 pdesc->statLength = arg->stat_len;
1072 pdesc->infoLength = arg->info_len;
1073 pdesc->strsLength = arg->strs_len;
1074 desc_len = strncpy_from_user(pdesc->description, u64_to_user_ptr(arg->description),
1075 ARRAY_SIZE(pdesc->description) - 1);
1076
1077 if (desc_len < 0) {
1078 atomic_set(&dev_priv->mksstat_user_pids[slot], 0);
1079 return -EFAULT;
1080 }
1081
1082 reset_ppn_array(pdesc->statPPNs, ARRAY_SIZE(pdesc->statPPNs));
1083 reset_ppn_array(pdesc->infoPPNs, ARRAY_SIZE(pdesc->infoPPNs));
1084 reset_ppn_array(pdesc->strsPPNs, ARRAY_SIZE(pdesc->strsPPNs));
1085
1086
1087 nr_pinned_stat = pin_user_pages(arg->stat, num_pages_stat, FOLL_LONGTERM, pages_stat, NULL);
1088 if (num_pages_stat != nr_pinned_stat)
1089 goto err_pin_stat;
1090
1091 for (i = 0; i < num_pages_stat; ++i)
1092 pdesc->statPPNs[i] = page_to_pfn(pages_stat[i]);
1093
1094 nr_pinned_info = pin_user_pages(arg->info, num_pages_info, FOLL_LONGTERM, pages_info, NULL);
1095 if (num_pages_info != nr_pinned_info)
1096 goto err_pin_info;
1097
1098 for (i = 0; i < num_pages_info; ++i)
1099 pdesc->infoPPNs[i] = page_to_pfn(pages_info[i]);
1100
1101 nr_pinned_strs = pin_user_pages(arg->strs, num_pages_strs, FOLL_LONGTERM, pages_strs, NULL);
1102 if (num_pages_strs != nr_pinned_strs)
1103 goto err_pin_strs;
1104
1105 for (i = 0; i < num_pages_strs; ++i)
1106 pdesc->strsPPNs[i] = page_to_pfn(pages_strs[i]);
1107
1108
1109
1110
1111 hypervisor_ppn_add((PPN64)page_to_pfn(page));
1112
1113 dev_priv->mksstat_user_pages[slot] = page;
1114 atomic_set(&dev_priv->mksstat_user_pids[slot], task_pgrp_vnr(current));
1115
1116 arg->id = slot;
1117
1118 DRM_DEV_INFO(dev->dev, "pid=%d arg.description='%.*s' id=%zu\n", current->pid, (int)desc_len, pdesc->description, slot);
1119
1120 return 0;
1121
1122err_pin_strs:
1123 if (nr_pinned_strs > 0)
1124 unpin_user_pages(pages_strs, nr_pinned_strs);
1125
1126err_pin_info:
1127 if (nr_pinned_info > 0)
1128 unpin_user_pages(pages_info, nr_pinned_info);
1129
1130err_pin_stat:
1131 if (nr_pinned_stat > 0)
1132 unpin_user_pages(pages_stat, nr_pinned_stat);
1133
1134 atomic_set(&dev_priv->mksstat_user_pids[slot], 0);
1135 __free_page(page);
1136 return -ENOMEM;
1137}
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152int vmw_mksstat_remove_ioctl(struct drm_device *dev, void *data,
1153 struct drm_file *file_priv)
1154{
1155 struct drm_vmw_mksstat_remove_arg *arg =
1156 (struct drm_vmw_mksstat_remove_arg *) data;
1157
1158 struct vmw_private *const dev_priv = vmw_priv(dev);
1159
1160 const size_t slot = arg->id;
1161 pid_t pgid, pid;
1162
1163 if (slot >= ARRAY_SIZE(dev_priv->mksstat_user_pids))
1164 return -EINVAL;
1165
1166 DRM_DEV_INFO(dev->dev, "pid=%d arg.id=%zu\n", current->pid, slot);
1167
1168 pgid = task_pgrp_vnr(current);
1169 pid = atomic_cmpxchg(&dev_priv->mksstat_user_pids[slot], pgid, MKSSTAT_PID_RESERVED);
1170
1171 if (!pid)
1172 return 0;
1173
1174 if (pid == pgid) {
1175 struct page *const page = dev_priv->mksstat_user_pages[slot];
1176
1177 BUG_ON(!page);
1178
1179 dev_priv->mksstat_user_pages[slot] = NULL;
1180 atomic_set(&dev_priv->mksstat_user_pids[slot], 0);
1181
1182 hypervisor_ppn_remove((PPN64)page_to_pfn(page));
1183
1184 vmw_mksstat_cleanup_descriptor(page);
1185 return 0;
1186 }
1187
1188 return -EAGAIN;
1189}
1190