1
2
3
4
5
6
7
8
9
10
11
12
13#include "../kselftest_harness.h"
14
15#include <errno.h>
16#include <fcntl.h>
17#include <stdio.h>
18#include <stdlib.h>
19#include <stdint.h>
20#include <unistd.h>
21#include <strings.h>
22#include <time.h>
23#include <pthread.h>
24#include <hugetlbfs.h>
25#include <sys/types.h>
26#include <sys/stat.h>
27#include <sys/mman.h>
28#include <sys/ioctl.h>
29
30
31
32
33
34#include "../../../../lib/test_hmm_uapi.h"
35
36struct hmm_buffer {
37 void *ptr;
38 void *mirror;
39 unsigned long size;
40 int fd;
41 uint64_t cpages;
42 uint64_t faults;
43};
44
45#define TWOMEG (1 << 21)
46#define HMM_BUFFER_SIZE (1024 << 12)
47#define HMM_PATH_MAX 64
48#define NTIMES 256
49
50#define ALIGN(x, a) (((x) + (a - 1)) & (~((a) - 1)))
51
52FIXTURE(hmm)
53{
54 int fd;
55 unsigned int page_size;
56 unsigned int page_shift;
57};
58
59FIXTURE(hmm2)
60{
61 int fd0;
62 int fd1;
63 unsigned int page_size;
64 unsigned int page_shift;
65};
66
67static int hmm_open(int unit)
68{
69 char pathname[HMM_PATH_MAX];
70 int fd;
71
72 snprintf(pathname, sizeof(pathname), "/dev/hmm_dmirror%d", unit);
73 fd = open(pathname, O_RDWR, 0);
74 if (fd < 0)
75 fprintf(stderr, "could not open hmm dmirror driver (%s)\n",
76 pathname);
77 return fd;
78}
79
80FIXTURE_SETUP(hmm)
81{
82 self->page_size = sysconf(_SC_PAGE_SIZE);
83 self->page_shift = ffs(self->page_size) - 1;
84
85 self->fd = hmm_open(0);
86 ASSERT_GE(self->fd, 0);
87}
88
89FIXTURE_SETUP(hmm2)
90{
91 self->page_size = sysconf(_SC_PAGE_SIZE);
92 self->page_shift = ffs(self->page_size) - 1;
93
94 self->fd0 = hmm_open(0);
95 ASSERT_GE(self->fd0, 0);
96 self->fd1 = hmm_open(1);
97 ASSERT_GE(self->fd1, 0);
98}
99
100FIXTURE_TEARDOWN(hmm)
101{
102 int ret = close(self->fd);
103
104 ASSERT_EQ(ret, 0);
105 self->fd = -1;
106}
107
108FIXTURE_TEARDOWN(hmm2)
109{
110 int ret = close(self->fd0);
111
112 ASSERT_EQ(ret, 0);
113 self->fd0 = -1;
114
115 ret = close(self->fd1);
116 ASSERT_EQ(ret, 0);
117 self->fd1 = -1;
118}
119
120static int hmm_dmirror_cmd(int fd,
121 unsigned long request,
122 struct hmm_buffer *buffer,
123 unsigned long npages)
124{
125 struct hmm_dmirror_cmd cmd;
126 int ret;
127
128
129 cmd.addr = (__u64)buffer->ptr;
130 cmd.ptr = (__u64)buffer->mirror;
131 cmd.npages = npages;
132
133 for (;;) {
134 ret = ioctl(fd, request, &cmd);
135 if (ret == 0)
136 break;
137 if (errno == EINTR)
138 continue;
139 return -errno;
140 }
141 buffer->cpages = cmd.cpages;
142 buffer->faults = cmd.faults;
143
144 return 0;
145}
146
147static void hmm_buffer_free(struct hmm_buffer *buffer)
148{
149 if (buffer == NULL)
150 return;
151
152 if (buffer->ptr)
153 munmap(buffer->ptr, buffer->size);
154 free(buffer->mirror);
155 free(buffer);
156}
157
158
159
160
161static int hmm_create_file(unsigned long size)
162{
163 char path[HMM_PATH_MAX];
164 int fd;
165
166 strcpy(path, "/tmp");
167 fd = open(path, O_TMPFILE | O_EXCL | O_RDWR, 0600);
168 if (fd >= 0) {
169 int r;
170
171 do {
172 r = ftruncate(fd, size);
173 } while (r == -1 && errno == EINTR);
174 if (!r)
175 return fd;
176 close(fd);
177 }
178 return -1;
179}
180
181
182
183
184static unsigned int hmm_random(void)
185{
186 static int fd = -1;
187 unsigned int r;
188
189 if (fd < 0) {
190 fd = open("/dev/urandom", O_RDONLY);
191 if (fd < 0) {
192 fprintf(stderr, "%s:%d failed to open /dev/urandom\n",
193 __FILE__, __LINE__);
194 return ~0U;
195 }
196 }
197 read(fd, &r, sizeof(r));
198 return r;
199}
200
201static void hmm_nanosleep(unsigned int n)
202{
203 struct timespec t;
204
205 t.tv_sec = 0;
206 t.tv_nsec = n;
207 nanosleep(&t, NULL);
208}
209
210
211
212
213TEST_F(hmm, open_close)
214{
215}
216
217
218
219
220TEST_F(hmm, anon_read)
221{
222 struct hmm_buffer *buffer;
223 unsigned long npages;
224 unsigned long size;
225 unsigned long i;
226 int *ptr;
227 int ret;
228 int val;
229
230 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
231 ASSERT_NE(npages, 0);
232 size = npages << self->page_shift;
233
234 buffer = malloc(sizeof(*buffer));
235 ASSERT_NE(buffer, NULL);
236
237 buffer->fd = -1;
238 buffer->size = size;
239 buffer->mirror = malloc(size);
240 ASSERT_NE(buffer->mirror, NULL);
241
242 buffer->ptr = mmap(NULL, size,
243 PROT_READ | PROT_WRITE,
244 MAP_PRIVATE | MAP_ANONYMOUS,
245 buffer->fd, 0);
246 ASSERT_NE(buffer->ptr, MAP_FAILED);
247
248
249
250
251
252 i = 2 * self->page_size / sizeof(*ptr);
253 for (ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
254 ptr[i] = i;
255
256
257 ret = mprotect(buffer->ptr, size, PROT_READ);
258 ASSERT_EQ(ret, 0);
259
260
261 val = *(int *)(buffer->ptr + self->page_size);
262 ASSERT_EQ(val, 0);
263
264
265 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
266 ASSERT_EQ(ret, 0);
267 ASSERT_EQ(buffer->cpages, npages);
268 ASSERT_EQ(buffer->faults, 1);
269
270
271 ptr = buffer->mirror;
272 for (i = 0; i < 2 * self->page_size / sizeof(*ptr); ++i)
273 ASSERT_EQ(ptr[i], 0);
274 for (; i < size / sizeof(*ptr); ++i)
275 ASSERT_EQ(ptr[i], i);
276
277 hmm_buffer_free(buffer);
278}
279
280
281
282
283
284TEST_F(hmm, anon_read_prot)
285{
286 struct hmm_buffer *buffer;
287 unsigned long npages;
288 unsigned long size;
289 unsigned long i;
290 int *ptr;
291 int ret;
292
293 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
294 ASSERT_NE(npages, 0);
295 size = npages << self->page_shift;
296
297 buffer = malloc(sizeof(*buffer));
298 ASSERT_NE(buffer, NULL);
299
300 buffer->fd = -1;
301 buffer->size = size;
302 buffer->mirror = malloc(size);
303 ASSERT_NE(buffer->mirror, NULL);
304
305 buffer->ptr = mmap(NULL, size,
306 PROT_READ | PROT_WRITE,
307 MAP_PRIVATE | MAP_ANONYMOUS,
308 buffer->fd, 0);
309 ASSERT_NE(buffer->ptr, MAP_FAILED);
310
311
312 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
313 ptr[i] = i;
314
315
316 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
317 ptr[i] = -i;
318
319
320 ret = mprotect(buffer->ptr, size, PROT_NONE);
321 ASSERT_EQ(ret, 0);
322
323
324 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
325 ASSERT_EQ(ret, -EFAULT);
326
327
328 ret = mprotect(buffer->ptr, size, PROT_READ);
329 ASSERT_EQ(ret, 0);
330 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
331 ASSERT_EQ(ptr[i], i);
332
333
334 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
335 ASSERT_EQ(ptr[i], -i);
336
337 hmm_buffer_free(buffer);
338}
339
340
341
342
343TEST_F(hmm, anon_write)
344{
345 struct hmm_buffer *buffer;
346 unsigned long npages;
347 unsigned long size;
348 unsigned long i;
349 int *ptr;
350 int ret;
351
352 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
353 ASSERT_NE(npages, 0);
354 size = npages << self->page_shift;
355
356 buffer = malloc(sizeof(*buffer));
357 ASSERT_NE(buffer, NULL);
358
359 buffer->fd = -1;
360 buffer->size = size;
361 buffer->mirror = malloc(size);
362 ASSERT_NE(buffer->mirror, NULL);
363
364 buffer->ptr = mmap(NULL, size,
365 PROT_READ | PROT_WRITE,
366 MAP_PRIVATE | MAP_ANONYMOUS,
367 buffer->fd, 0);
368 ASSERT_NE(buffer->ptr, MAP_FAILED);
369
370
371 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
372 ptr[i] = i;
373
374
375 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
376 ASSERT_EQ(ret, 0);
377 ASSERT_EQ(buffer->cpages, npages);
378 ASSERT_EQ(buffer->faults, 1);
379
380
381 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
382 ASSERT_EQ(ptr[i], i);
383
384 hmm_buffer_free(buffer);
385}
386
387
388
389
390
391TEST_F(hmm, anon_write_prot)
392{
393 struct hmm_buffer *buffer;
394 unsigned long npages;
395 unsigned long size;
396 unsigned long i;
397 int *ptr;
398 int ret;
399
400 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
401 ASSERT_NE(npages, 0);
402 size = npages << self->page_shift;
403
404 buffer = malloc(sizeof(*buffer));
405 ASSERT_NE(buffer, NULL);
406
407 buffer->fd = -1;
408 buffer->size = size;
409 buffer->mirror = malloc(size);
410 ASSERT_NE(buffer->mirror, NULL);
411
412 buffer->ptr = mmap(NULL, size,
413 PROT_READ,
414 MAP_PRIVATE | MAP_ANONYMOUS,
415 buffer->fd, 0);
416 ASSERT_NE(buffer->ptr, MAP_FAILED);
417
418
419 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, 1);
420 ASSERT_EQ(ret, 0);
421 ASSERT_EQ(buffer->cpages, 1);
422 ASSERT_EQ(buffer->faults, 1);
423
424
425 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
426 ptr[i] = i;
427
428
429 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
430 ASSERT_EQ(ret, -EPERM);
431
432
433 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
434 ASSERT_EQ(ptr[i], 0);
435
436
437 ret = mprotect(buffer->ptr, size, PROT_WRITE | PROT_READ);
438 ASSERT_EQ(ret, 0);
439
440
441 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
442 ASSERT_EQ(ret, 0);
443 ASSERT_EQ(buffer->cpages, npages);
444 ASSERT_EQ(buffer->faults, 1);
445
446
447 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
448 ASSERT_EQ(ptr[i], i);
449
450 hmm_buffer_free(buffer);
451}
452
453
454
455
456
457TEST_F(hmm, anon_write_child)
458{
459 struct hmm_buffer *buffer;
460 unsigned long npages;
461 unsigned long size;
462 unsigned long i;
463 int *ptr;
464 pid_t pid;
465 int child_fd;
466 int ret;
467
468 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
469 ASSERT_NE(npages, 0);
470 size = npages << self->page_shift;
471
472 buffer = malloc(sizeof(*buffer));
473 ASSERT_NE(buffer, NULL);
474
475 buffer->fd = -1;
476 buffer->size = size;
477 buffer->mirror = malloc(size);
478 ASSERT_NE(buffer->mirror, NULL);
479
480 buffer->ptr = mmap(NULL, size,
481 PROT_READ | PROT_WRITE,
482 MAP_PRIVATE | MAP_ANONYMOUS,
483 buffer->fd, 0);
484 ASSERT_NE(buffer->ptr, MAP_FAILED);
485
486
487 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
488 ptr[i] = i;
489
490
491 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
492 ptr[i] = -i;
493
494 pid = fork();
495 if (pid == -1)
496 ASSERT_EQ(pid, 0);
497 if (pid != 0) {
498 waitpid(pid, &ret, 0);
499 ASSERT_EQ(WIFEXITED(ret), 1);
500
501
502 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
503 ASSERT_EQ(ptr[i], i);
504 return;
505 }
506
507
508 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
509 ASSERT_EQ(ptr[i], i);
510 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
511 ASSERT_EQ(ptr[i], -i);
512
513
514 child_fd = hmm_open(0);
515 ASSERT_GE(child_fd, 0);
516
517
518 ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
519 ASSERT_EQ(ret, 0);
520 ASSERT_EQ(buffer->cpages, npages);
521 ASSERT_EQ(buffer->faults, 1);
522
523
524 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
525 ASSERT_EQ(ptr[i], -i);
526
527 close(child_fd);
528 exit(0);
529}
530
531
532
533
534
535TEST_F(hmm, anon_write_child_shared)
536{
537 struct hmm_buffer *buffer;
538 unsigned long npages;
539 unsigned long size;
540 unsigned long i;
541 int *ptr;
542 pid_t pid;
543 int child_fd;
544 int ret;
545
546 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
547 ASSERT_NE(npages, 0);
548 size = npages << self->page_shift;
549
550 buffer = malloc(sizeof(*buffer));
551 ASSERT_NE(buffer, NULL);
552
553 buffer->fd = -1;
554 buffer->size = size;
555 buffer->mirror = malloc(size);
556 ASSERT_NE(buffer->mirror, NULL);
557
558 buffer->ptr = mmap(NULL, size,
559 PROT_READ | PROT_WRITE,
560 MAP_SHARED | MAP_ANONYMOUS,
561 buffer->fd, 0);
562 ASSERT_NE(buffer->ptr, MAP_FAILED);
563
564
565 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
566 ptr[i] = i;
567
568
569 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
570 ptr[i] = -i;
571
572 pid = fork();
573 if (pid == -1)
574 ASSERT_EQ(pid, 0);
575 if (pid != 0) {
576 waitpid(pid, &ret, 0);
577 ASSERT_EQ(WIFEXITED(ret), 1);
578
579
580 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
581 ASSERT_EQ(ptr[i], -i);
582 return;
583 }
584
585
586 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
587 ASSERT_EQ(ptr[i], i);
588 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
589 ASSERT_EQ(ptr[i], -i);
590
591
592 child_fd = hmm_open(0);
593 ASSERT_GE(child_fd, 0);
594
595
596 ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
597 ASSERT_EQ(ret, 0);
598 ASSERT_EQ(buffer->cpages, npages);
599 ASSERT_EQ(buffer->faults, 1);
600
601
602 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
603 ASSERT_EQ(ptr[i], -i);
604
605 close(child_fd);
606 exit(0);
607}
608
609
610
611
612TEST_F(hmm, anon_write_huge)
613{
614 struct hmm_buffer *buffer;
615 unsigned long npages;
616 unsigned long size;
617 unsigned long i;
618 void *old_ptr;
619 void *map;
620 int *ptr;
621 int ret;
622
623 size = 2 * TWOMEG;
624
625 buffer = malloc(sizeof(*buffer));
626 ASSERT_NE(buffer, NULL);
627
628 buffer->fd = -1;
629 buffer->size = size;
630 buffer->mirror = malloc(size);
631 ASSERT_NE(buffer->mirror, NULL);
632
633 buffer->ptr = mmap(NULL, size,
634 PROT_READ | PROT_WRITE,
635 MAP_PRIVATE | MAP_ANONYMOUS,
636 buffer->fd, 0);
637 ASSERT_NE(buffer->ptr, MAP_FAILED);
638
639 size = TWOMEG;
640 npages = size >> self->page_shift;
641 map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
642 ret = madvise(map, size, MADV_HUGEPAGE);
643 ASSERT_EQ(ret, 0);
644 old_ptr = buffer->ptr;
645 buffer->ptr = map;
646
647
648 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
649 ptr[i] = i;
650
651
652 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
653 ASSERT_EQ(ret, 0);
654 ASSERT_EQ(buffer->cpages, npages);
655 ASSERT_EQ(buffer->faults, 1);
656
657
658 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
659 ASSERT_EQ(ptr[i], i);
660
661 buffer->ptr = old_ptr;
662 hmm_buffer_free(buffer);
663}
664
665
666
667
668TEST_F(hmm, anon_write_hugetlbfs)
669{
670 struct hmm_buffer *buffer;
671 unsigned long npages;
672 unsigned long size;
673 unsigned long i;
674 int *ptr;
675 int ret;
676 long pagesizes[4];
677 int n, idx;
678
679
680
681 n = gethugepagesizes(pagesizes, 4);
682 if (n <= 0)
683 return;
684 for (idx = 0; --n > 0; ) {
685 if (pagesizes[n] < pagesizes[idx])
686 idx = n;
687 }
688 size = ALIGN(TWOMEG, pagesizes[idx]);
689 npages = size >> self->page_shift;
690
691 buffer = malloc(sizeof(*buffer));
692 ASSERT_NE(buffer, NULL);
693
694 buffer->ptr = get_hugepage_region(size, GHR_STRICT);
695 if (buffer->ptr == NULL) {
696 free(buffer);
697 return;
698 }
699
700 buffer->fd = -1;
701 buffer->size = size;
702 buffer->mirror = malloc(size);
703 ASSERT_NE(buffer->mirror, NULL);
704
705
706 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
707 ptr[i] = i;
708
709
710 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
711 ASSERT_EQ(ret, 0);
712 ASSERT_EQ(buffer->cpages, npages);
713 ASSERT_EQ(buffer->faults, 1);
714
715
716 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
717 ASSERT_EQ(ptr[i], i);
718
719 free_hugepage_region(buffer->ptr);
720 buffer->ptr = NULL;
721 hmm_buffer_free(buffer);
722}
723
724
725
726
727TEST_F(hmm, file_read)
728{
729 struct hmm_buffer *buffer;
730 unsigned long npages;
731 unsigned long size;
732 unsigned long i;
733 int *ptr;
734 int ret;
735 int fd;
736 ssize_t len;
737
738 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
739 ASSERT_NE(npages, 0);
740 size = npages << self->page_shift;
741
742 fd = hmm_create_file(size);
743 ASSERT_GE(fd, 0);
744
745 buffer = malloc(sizeof(*buffer));
746 ASSERT_NE(buffer, NULL);
747
748 buffer->fd = fd;
749 buffer->size = size;
750 buffer->mirror = malloc(size);
751 ASSERT_NE(buffer->mirror, NULL);
752
753
754 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
755 ptr[i] = i;
756 len = pwrite(fd, buffer->mirror, size, 0);
757 ASSERT_EQ(len, size);
758 memset(buffer->mirror, 0, size);
759
760 buffer->ptr = mmap(NULL, size,
761 PROT_READ,
762 MAP_SHARED,
763 buffer->fd, 0);
764 ASSERT_NE(buffer->ptr, MAP_FAILED);
765
766
767 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
768 ASSERT_EQ(ret, 0);
769 ASSERT_EQ(buffer->cpages, npages);
770 ASSERT_EQ(buffer->faults, 1);
771
772
773 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
774 ASSERT_EQ(ptr[i], i);
775
776 hmm_buffer_free(buffer);
777}
778
779
780
781
782TEST_F(hmm, file_write)
783{
784 struct hmm_buffer *buffer;
785 unsigned long npages;
786 unsigned long size;
787 unsigned long i;
788 int *ptr;
789 int ret;
790 int fd;
791 ssize_t len;
792
793 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
794 ASSERT_NE(npages, 0);
795 size = npages << self->page_shift;
796
797 fd = hmm_create_file(size);
798 ASSERT_GE(fd, 0);
799
800 buffer = malloc(sizeof(*buffer));
801 ASSERT_NE(buffer, NULL);
802
803 buffer->fd = fd;
804 buffer->size = size;
805 buffer->mirror = malloc(size);
806 ASSERT_NE(buffer->mirror, NULL);
807
808 buffer->ptr = mmap(NULL, size,
809 PROT_READ | PROT_WRITE,
810 MAP_SHARED,
811 buffer->fd, 0);
812 ASSERT_NE(buffer->ptr, MAP_FAILED);
813
814
815 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
816 ptr[i] = i;
817
818
819 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
820 ASSERT_EQ(ret, 0);
821 ASSERT_EQ(buffer->cpages, npages);
822 ASSERT_EQ(buffer->faults, 1);
823
824
825 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
826 ASSERT_EQ(ptr[i], i);
827
828
829 len = pread(fd, buffer->mirror, size, 0);
830 ASSERT_EQ(len, size);
831 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
832 ASSERT_EQ(ptr[i], i);
833
834 hmm_buffer_free(buffer);
835}
836
837
838
839
840TEST_F(hmm, migrate)
841{
842 struct hmm_buffer *buffer;
843 unsigned long npages;
844 unsigned long size;
845 unsigned long i;
846 int *ptr;
847 int ret;
848
849 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
850 ASSERT_NE(npages, 0);
851 size = npages << self->page_shift;
852
853 buffer = malloc(sizeof(*buffer));
854 ASSERT_NE(buffer, NULL);
855
856 buffer->fd = -1;
857 buffer->size = size;
858 buffer->mirror = malloc(size);
859 ASSERT_NE(buffer->mirror, NULL);
860
861 buffer->ptr = mmap(NULL, size,
862 PROT_READ | PROT_WRITE,
863 MAP_PRIVATE | MAP_ANONYMOUS,
864 buffer->fd, 0);
865 ASSERT_NE(buffer->ptr, MAP_FAILED);
866
867
868 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
869 ptr[i] = i;
870
871
872 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
873 ASSERT_EQ(ret, 0);
874 ASSERT_EQ(buffer->cpages, npages);
875
876
877 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
878 ASSERT_EQ(ptr[i], i);
879
880 hmm_buffer_free(buffer);
881}
882
883
884
885
886
887
888TEST_F(hmm, migrate_fault)
889{
890 struct hmm_buffer *buffer;
891 unsigned long npages;
892 unsigned long size;
893 unsigned long i;
894 int *ptr;
895 int ret;
896
897 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
898 ASSERT_NE(npages, 0);
899 size = npages << self->page_shift;
900
901 buffer = malloc(sizeof(*buffer));
902 ASSERT_NE(buffer, NULL);
903
904 buffer->fd = -1;
905 buffer->size = size;
906 buffer->mirror = malloc(size);
907 ASSERT_NE(buffer->mirror, NULL);
908
909 buffer->ptr = mmap(NULL, size,
910 PROT_READ | PROT_WRITE,
911 MAP_PRIVATE | MAP_ANONYMOUS,
912 buffer->fd, 0);
913 ASSERT_NE(buffer->ptr, MAP_FAILED);
914
915
916 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
917 ptr[i] = i;
918
919
920 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
921 ASSERT_EQ(ret, 0);
922 ASSERT_EQ(buffer->cpages, npages);
923
924
925 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
926 ASSERT_EQ(ptr[i], i);
927
928
929 for (i = 0, ptr = buffer->ptr; i < size / (2 * sizeof(*ptr)); ++i)
930 ASSERT_EQ(ptr[i], i);
931
932
933 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
934 ASSERT_EQ(ret, 0);
935 ASSERT_EQ(buffer->cpages, npages);
936
937
938 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
939 ASSERT_EQ(ptr[i], i);
940
941 hmm_buffer_free(buffer);
942}
943
944
945
946
947TEST_F(hmm, migrate_shared)
948{
949 struct hmm_buffer *buffer;
950 unsigned long npages;
951 unsigned long size;
952 int ret;
953
954 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
955 ASSERT_NE(npages, 0);
956 size = npages << self->page_shift;
957
958 buffer = malloc(sizeof(*buffer));
959 ASSERT_NE(buffer, NULL);
960
961 buffer->fd = -1;
962 buffer->size = size;
963 buffer->mirror = malloc(size);
964 ASSERT_NE(buffer->mirror, NULL);
965
966 buffer->ptr = mmap(NULL, size,
967 PROT_READ | PROT_WRITE,
968 MAP_SHARED | MAP_ANONYMOUS,
969 buffer->fd, 0);
970 ASSERT_NE(buffer->ptr, MAP_FAILED);
971
972
973 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
974 ASSERT_EQ(ret, -ENOENT);
975
976 hmm_buffer_free(buffer);
977}
978
979
980
981
982TEST_F(hmm2, migrate_mixed)
983{
984 struct hmm_buffer *buffer;
985 unsigned long npages;
986 unsigned long size;
987 int *ptr;
988 unsigned char *p;
989 int ret;
990 int val;
991
992 npages = 6;
993 size = npages << self->page_shift;
994
995 buffer = malloc(sizeof(*buffer));
996 ASSERT_NE(buffer, NULL);
997
998 buffer->fd = -1;
999 buffer->size = size;
1000 buffer->mirror = malloc(size);
1001 ASSERT_NE(buffer->mirror, NULL);
1002
1003
1004 buffer->ptr = mmap(NULL, size,
1005 PROT_NONE,
1006 MAP_PRIVATE | MAP_ANONYMOUS,
1007 buffer->fd, 0);
1008 ASSERT_NE(buffer->ptr, MAP_FAILED);
1009 p = buffer->ptr;
1010
1011
1012 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, npages);
1013 ASSERT_EQ(ret, -EINVAL);
1014
1015
1016 ret = munmap(buffer->ptr + self->page_size, self->page_size);
1017 ASSERT_EQ(ret, 0);
1018
1019
1020 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 3);
1021 ASSERT_EQ(ret, -EINVAL);
1022
1023
1024 ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size,
1025 PROT_READ);
1026 ASSERT_EQ(ret, 0);
1027 ptr = (int *)(buffer->ptr + 2 * self->page_size);
1028 val = *ptr + 3;
1029 ASSERT_EQ(val, 3);
1030
1031
1032 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1033 PROT_READ | PROT_WRITE);
1034 ASSERT_EQ(ret, 0);
1035 ptr = (int *)(buffer->ptr + 3 * self->page_size);
1036 *ptr = val;
1037 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1038 PROT_READ);
1039 ASSERT_EQ(ret, 0);
1040
1041
1042 ret = mprotect(buffer->ptr + 4 * self->page_size, 2 * self->page_size,
1043 PROT_READ | PROT_WRITE);
1044 ASSERT_EQ(ret, 0);
1045 ptr = (int *)(buffer->ptr + 4 * self->page_size);
1046 *ptr = val;
1047 ptr = (int *)(buffer->ptr + 5 * self->page_size);
1048 *ptr = val;
1049
1050
1051 buffer->ptr = p + 2 * self->page_size;
1052 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 4);
1053 ASSERT_EQ(ret, 0);
1054 ASSERT_EQ(buffer->cpages, 4);
1055
1056
1057 buffer->ptr = p + 5 * self->page_size;
1058 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_MIGRATE, buffer, 1);
1059 ASSERT_EQ(ret, -ENOENT);
1060 buffer->ptr = p;
1061
1062 buffer->ptr = p;
1063 hmm_buffer_free(buffer);
1064}
1065
1066
1067
1068
1069
1070TEST_F(hmm, migrate_multiple)
1071{
1072 struct hmm_buffer *buffer;
1073 unsigned long npages;
1074 unsigned long size;
1075 unsigned long i;
1076 unsigned long c;
1077 int *ptr;
1078 int ret;
1079
1080 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1081 ASSERT_NE(npages, 0);
1082 size = npages << self->page_shift;
1083
1084 for (c = 0; c < NTIMES; c++) {
1085 buffer = malloc(sizeof(*buffer));
1086 ASSERT_NE(buffer, NULL);
1087
1088 buffer->fd = -1;
1089 buffer->size = size;
1090 buffer->mirror = malloc(size);
1091 ASSERT_NE(buffer->mirror, NULL);
1092
1093 buffer->ptr = mmap(NULL, size,
1094 PROT_READ | PROT_WRITE,
1095 MAP_PRIVATE | MAP_ANONYMOUS,
1096 buffer->fd, 0);
1097 ASSERT_NE(buffer->ptr, MAP_FAILED);
1098
1099
1100 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1101 ptr[i] = i;
1102
1103
1104 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer,
1105 npages);
1106 ASSERT_EQ(ret, 0);
1107 ASSERT_EQ(buffer->cpages, npages);
1108
1109
1110 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1111 ASSERT_EQ(ptr[i], i);
1112
1113
1114 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1115 ASSERT_EQ(ptr[i], i);
1116
1117 hmm_buffer_free(buffer);
1118 }
1119}
1120
1121
1122
1123
1124TEST_F(hmm, anon_read_multiple)
1125{
1126 struct hmm_buffer *buffer;
1127 unsigned long npages;
1128 unsigned long size;
1129 unsigned long i;
1130 unsigned long c;
1131 int *ptr;
1132 int ret;
1133
1134 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1135 ASSERT_NE(npages, 0);
1136 size = npages << self->page_shift;
1137
1138 for (c = 0; c < NTIMES; c++) {
1139 buffer = malloc(sizeof(*buffer));
1140 ASSERT_NE(buffer, NULL);
1141
1142 buffer->fd = -1;
1143 buffer->size = size;
1144 buffer->mirror = malloc(size);
1145 ASSERT_NE(buffer->mirror, NULL);
1146
1147 buffer->ptr = mmap(NULL, size,
1148 PROT_READ | PROT_WRITE,
1149 MAP_PRIVATE | MAP_ANONYMOUS,
1150 buffer->fd, 0);
1151 ASSERT_NE(buffer->ptr, MAP_FAILED);
1152
1153
1154 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1155 ptr[i] = i + c;
1156
1157
1158 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer,
1159 npages);
1160 ASSERT_EQ(ret, 0);
1161 ASSERT_EQ(buffer->cpages, npages);
1162 ASSERT_EQ(buffer->faults, 1);
1163
1164
1165 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1166 ASSERT_EQ(ptr[i], i + c);
1167
1168 hmm_buffer_free(buffer);
1169 }
1170}
1171
1172void *unmap_buffer(void *p)
1173{
1174 struct hmm_buffer *buffer = p;
1175
1176
1177 hmm_nanosleep(hmm_random() % 32000);
1178 munmap(buffer->ptr + buffer->size / 2, buffer->size / 2);
1179 buffer->ptr = NULL;
1180
1181 return NULL;
1182}
1183
1184
1185
1186
1187TEST_F(hmm, anon_teardown)
1188{
1189 unsigned long npages;
1190 unsigned long size;
1191 unsigned long c;
1192 void *ret;
1193
1194 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1195 ASSERT_NE(npages, 0);
1196 size = npages << self->page_shift;
1197
1198 for (c = 0; c < NTIMES; ++c) {
1199 pthread_t thread;
1200 struct hmm_buffer *buffer;
1201 unsigned long i;
1202 int *ptr;
1203 int rc;
1204
1205 buffer = malloc(sizeof(*buffer));
1206 ASSERT_NE(buffer, NULL);
1207
1208 buffer->fd = -1;
1209 buffer->size = size;
1210 buffer->mirror = malloc(size);
1211 ASSERT_NE(buffer->mirror, NULL);
1212
1213 buffer->ptr = mmap(NULL, size,
1214 PROT_READ | PROT_WRITE,
1215 MAP_PRIVATE | MAP_ANONYMOUS,
1216 buffer->fd, 0);
1217 ASSERT_NE(buffer->ptr, MAP_FAILED);
1218
1219
1220 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1221 ptr[i] = i + c;
1222
1223 rc = pthread_create(&thread, NULL, unmap_buffer, buffer);
1224 ASSERT_EQ(rc, 0);
1225
1226
1227 rc = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer,
1228 npages);
1229 if (rc == 0) {
1230 ASSERT_EQ(buffer->cpages, npages);
1231 ASSERT_EQ(buffer->faults, 1);
1232
1233
1234 for (i = 0, ptr = buffer->mirror;
1235 i < size / sizeof(*ptr);
1236 ++i)
1237 ASSERT_EQ(ptr[i], i + c);
1238 }
1239
1240 pthread_join(thread, &ret);
1241 hmm_buffer_free(buffer);
1242 }
1243}
1244
1245
1246
1247
1248TEST_F(hmm2, snapshot)
1249{
1250 struct hmm_buffer *buffer;
1251 unsigned long npages;
1252 unsigned long size;
1253 int *ptr;
1254 unsigned char *p;
1255 unsigned char *m;
1256 int ret;
1257 int val;
1258
1259 npages = 7;
1260 size = npages << self->page_shift;
1261
1262 buffer = malloc(sizeof(*buffer));
1263 ASSERT_NE(buffer, NULL);
1264
1265 buffer->fd = -1;
1266 buffer->size = size;
1267 buffer->mirror = malloc(npages);
1268 ASSERT_NE(buffer->mirror, NULL);
1269
1270
1271 buffer->ptr = mmap(NULL, size,
1272 PROT_NONE,
1273 MAP_PRIVATE | MAP_ANONYMOUS,
1274 buffer->fd, 0);
1275 ASSERT_NE(buffer->ptr, MAP_FAILED);
1276 p = buffer->ptr;
1277
1278
1279 ret = munmap(buffer->ptr + self->page_size, self->page_size);
1280 ASSERT_EQ(ret, 0);
1281
1282
1283 ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size,
1284 PROT_READ);
1285 ASSERT_EQ(ret, 0);
1286 ptr = (int *)(buffer->ptr + 2 * self->page_size);
1287 val = *ptr + 3;
1288 ASSERT_EQ(val, 3);
1289
1290
1291 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1292 PROT_READ | PROT_WRITE);
1293 ASSERT_EQ(ret, 0);
1294 ptr = (int *)(buffer->ptr + 3 * self->page_size);
1295 *ptr = val;
1296 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1297 PROT_READ);
1298 ASSERT_EQ(ret, 0);
1299
1300
1301 ret = mprotect(buffer->ptr + 4 * self->page_size, 3 * self->page_size,
1302 PROT_READ | PROT_WRITE);
1303 ASSERT_EQ(ret, 0);
1304 ptr = (int *)(buffer->ptr + 4 * self->page_size);
1305 *ptr = val;
1306
1307
1308 buffer->ptr = p + 5 * self->page_size;
1309 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_MIGRATE, buffer, 1);
1310 ASSERT_EQ(ret, 0);
1311 ASSERT_EQ(buffer->cpages, 1);
1312
1313
1314 buffer->ptr = p + 6 * self->page_size;
1315 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 1);
1316 ASSERT_EQ(ret, 0);
1317 ASSERT_EQ(buffer->cpages, 1);
1318
1319
1320 buffer->ptr = p;
1321 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1322 ASSERT_EQ(ret, 0);
1323 ASSERT_EQ(buffer->cpages, npages);
1324
1325
1326 m = buffer->mirror;
1327 ASSERT_EQ(m[0], HMM_DMIRROR_PROT_ERROR);
1328 ASSERT_EQ(m[1], HMM_DMIRROR_PROT_ERROR);
1329 ASSERT_EQ(m[2], HMM_DMIRROR_PROT_ZERO | HMM_DMIRROR_PROT_READ);
1330 ASSERT_EQ(m[3], HMM_DMIRROR_PROT_READ);
1331 ASSERT_EQ(m[4], HMM_DMIRROR_PROT_WRITE);
1332 ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL |
1333 HMM_DMIRROR_PROT_WRITE);
1334 ASSERT_EQ(m[6], HMM_DMIRROR_PROT_NONE);
1335
1336 hmm_buffer_free(buffer);
1337}
1338
1339
1340
1341
1342
1343TEST_F(hmm, compound)
1344{
1345 struct hmm_buffer *buffer;
1346 unsigned long npages;
1347 unsigned long size;
1348 int *ptr;
1349 unsigned char *m;
1350 int ret;
1351 long pagesizes[4];
1352 int n, idx;
1353 unsigned long i;
1354
1355
1356
1357 n = gethugepagesizes(pagesizes, 4);
1358 if (n <= 0)
1359 return;
1360 for (idx = 0; --n > 0; ) {
1361 if (pagesizes[n] < pagesizes[idx])
1362 idx = n;
1363 }
1364 size = ALIGN(TWOMEG, pagesizes[idx]);
1365 npages = size >> self->page_shift;
1366
1367 buffer = malloc(sizeof(*buffer));
1368 ASSERT_NE(buffer, NULL);
1369
1370 buffer->ptr = get_hugepage_region(size, GHR_STRICT);
1371 if (buffer->ptr == NULL) {
1372 free(buffer);
1373 return;
1374 }
1375
1376 buffer->size = size;
1377 buffer->mirror = malloc(npages);
1378 ASSERT_NE(buffer->mirror, NULL);
1379
1380
1381 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1382 ptr[i] = i;
1383
1384
1385 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1386 ASSERT_EQ(ret, 0);
1387 ASSERT_EQ(buffer->cpages, npages);
1388
1389
1390 m = buffer->mirror;
1391 for (i = 0; i < npages; ++i)
1392 ASSERT_EQ(m[i], HMM_DMIRROR_PROT_WRITE |
1393 HMM_DMIRROR_PROT_PMD);
1394
1395
1396 ret = mprotect(buffer->ptr, size, PROT_READ);
1397 ASSERT_EQ(ret, 0);
1398
1399
1400 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1401 ASSERT_EQ(ret, 0);
1402 ASSERT_EQ(buffer->cpages, npages);
1403
1404
1405 m = buffer->mirror;
1406 for (i = 0; i < npages; ++i)
1407 ASSERT_EQ(m[i], HMM_DMIRROR_PROT_READ |
1408 HMM_DMIRROR_PROT_PMD);
1409
1410 free_hugepage_region(buffer->ptr);
1411 buffer->ptr = NULL;
1412 hmm_buffer_free(buffer);
1413}
1414
1415
1416
1417
1418TEST_F(hmm2, double_map)
1419{
1420 struct hmm_buffer *buffer;
1421 unsigned long npages;
1422 unsigned long size;
1423 unsigned long i;
1424 int *ptr;
1425 int ret;
1426
1427 npages = 6;
1428 size = npages << self->page_shift;
1429
1430 buffer = malloc(sizeof(*buffer));
1431 ASSERT_NE(buffer, NULL);
1432
1433 buffer->fd = -1;
1434 buffer->size = size;
1435 buffer->mirror = malloc(npages);
1436 ASSERT_NE(buffer->mirror, NULL);
1437
1438
1439 buffer->ptr = mmap(NULL, size,
1440 PROT_READ | PROT_WRITE,
1441 MAP_PRIVATE | MAP_ANONYMOUS,
1442 buffer->fd, 0);
1443 ASSERT_NE(buffer->ptr, MAP_FAILED);
1444
1445
1446 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1447 ptr[i] = i;
1448
1449
1450 ret = mprotect(buffer->ptr, size, PROT_READ);
1451 ASSERT_EQ(ret, 0);
1452
1453
1454 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_READ, buffer, npages);
1455 ASSERT_EQ(ret, 0);
1456 ASSERT_EQ(buffer->cpages, npages);
1457 ASSERT_EQ(buffer->faults, 1);
1458
1459
1460 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1461 ASSERT_EQ(ptr[i], i);
1462
1463
1464 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_READ, buffer, npages);
1465 ASSERT_EQ(ret, 0);
1466 ASSERT_EQ(buffer->cpages, npages);
1467 ASSERT_EQ(buffer->faults, 1);
1468
1469
1470 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1471 ASSERT_EQ(ptr[i], i);
1472
1473
1474 ret = munmap(buffer->ptr + self->page_size, self->page_size);
1475 ASSERT_EQ(ret, 0);
1476
1477 hmm_buffer_free(buffer);
1478}
1479
1480TEST_HARNESS_MAIN
1481