1
2
3
4
5
6
7
8
9
10
11
12
13#include "../kselftest_harness.h"
14
15#include <errno.h>
16#include <fcntl.h>
17#include <stdio.h>
18#include <stdlib.h>
19#include <stdint.h>
20#include <unistd.h>
21#include <strings.h>
22#include <time.h>
23#include <pthread.h>
24#include <hugetlbfs.h>
25#include <sys/types.h>
26#include <sys/stat.h>
27#include <sys/mman.h>
28#include <sys/ioctl.h>
29
30
31
32
33
34#include "../../../../lib/test_hmm_uapi.h"
35
36struct hmm_buffer {
37 void *ptr;
38 void *mirror;
39 unsigned long size;
40 int fd;
41 uint64_t cpages;
42 uint64_t faults;
43};
44
45#define TWOMEG (1 << 21)
46#define HMM_BUFFER_SIZE (1024 << 12)
47#define HMM_PATH_MAX 64
48#define NTIMES 256
49
50#define ALIGN(x, a) (((x) + (a - 1)) & (~((a) - 1)))
51
52FIXTURE(hmm)
53{
54 int fd;
55 unsigned int page_size;
56 unsigned int page_shift;
57};
58
59FIXTURE(hmm2)
60{
61 int fd0;
62 int fd1;
63 unsigned int page_size;
64 unsigned int page_shift;
65};
66
67static int hmm_open(int unit)
68{
69 char pathname[HMM_PATH_MAX];
70 int fd;
71
72 snprintf(pathname, sizeof(pathname), "/dev/hmm_dmirror%d", unit);
73 fd = open(pathname, O_RDWR, 0);
74 if (fd < 0)
75 fprintf(stderr, "could not open hmm dmirror driver (%s)\n",
76 pathname);
77 return fd;
78}
79
80FIXTURE_SETUP(hmm)
81{
82 self->page_size = sysconf(_SC_PAGE_SIZE);
83 self->page_shift = ffs(self->page_size) - 1;
84
85 self->fd = hmm_open(0);
86 ASSERT_GE(self->fd, 0);
87}
88
89FIXTURE_SETUP(hmm2)
90{
91 self->page_size = sysconf(_SC_PAGE_SIZE);
92 self->page_shift = ffs(self->page_size) - 1;
93
94 self->fd0 = hmm_open(0);
95 ASSERT_GE(self->fd0, 0);
96 self->fd1 = hmm_open(1);
97 ASSERT_GE(self->fd1, 0);
98}
99
100FIXTURE_TEARDOWN(hmm)
101{
102 int ret = close(self->fd);
103
104 ASSERT_EQ(ret, 0);
105 self->fd = -1;
106}
107
108FIXTURE_TEARDOWN(hmm2)
109{
110 int ret = close(self->fd0);
111
112 ASSERT_EQ(ret, 0);
113 self->fd0 = -1;
114
115 ret = close(self->fd1);
116 ASSERT_EQ(ret, 0);
117 self->fd1 = -1;
118}
119
120static int hmm_dmirror_cmd(int fd,
121 unsigned long request,
122 struct hmm_buffer *buffer,
123 unsigned long npages)
124{
125 struct hmm_dmirror_cmd cmd;
126 int ret;
127
128
129 cmd.addr = (__u64)buffer->ptr;
130 cmd.ptr = (__u64)buffer->mirror;
131 cmd.npages = npages;
132
133 for (;;) {
134 ret = ioctl(fd, request, &cmd);
135 if (ret == 0)
136 break;
137 if (errno == EINTR)
138 continue;
139 return -errno;
140 }
141 buffer->cpages = cmd.cpages;
142 buffer->faults = cmd.faults;
143
144 return 0;
145}
146
147static void hmm_buffer_free(struct hmm_buffer *buffer)
148{
149 if (buffer == NULL)
150 return;
151
152 if (buffer->ptr)
153 munmap(buffer->ptr, buffer->size);
154 free(buffer->mirror);
155 free(buffer);
156}
157
158
159
160
161static int hmm_create_file(unsigned long size)
162{
163 char path[HMM_PATH_MAX];
164 int fd;
165
166 strcpy(path, "/tmp");
167 fd = open(path, O_TMPFILE | O_EXCL | O_RDWR, 0600);
168 if (fd >= 0) {
169 int r;
170
171 do {
172 r = ftruncate(fd, size);
173 } while (r == -1 && errno == EINTR);
174 if (!r)
175 return fd;
176 close(fd);
177 }
178 return -1;
179}
180
181
182
183
184static unsigned int hmm_random(void)
185{
186 static int fd = -1;
187 unsigned int r;
188
189 if (fd < 0) {
190 fd = open("/dev/urandom", O_RDONLY);
191 if (fd < 0) {
192 fprintf(stderr, "%s:%d failed to open /dev/urandom\n",
193 __FILE__, __LINE__);
194 return ~0U;
195 }
196 }
197 read(fd, &r, sizeof(r));
198 return r;
199}
200
201static void hmm_nanosleep(unsigned int n)
202{
203 struct timespec t;
204
205 t.tv_sec = 0;
206 t.tv_nsec = n;
207 nanosleep(&t, NULL);
208}
209
210
211
212
213TEST_F(hmm, open_close)
214{
215}
216
217
218
219
220TEST_F(hmm, anon_read)
221{
222 struct hmm_buffer *buffer;
223 unsigned long npages;
224 unsigned long size;
225 unsigned long i;
226 int *ptr;
227 int ret;
228 int val;
229
230 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
231 ASSERT_NE(npages, 0);
232 size = npages << self->page_shift;
233
234 buffer = malloc(sizeof(*buffer));
235 ASSERT_NE(buffer, NULL);
236
237 buffer->fd = -1;
238 buffer->size = size;
239 buffer->mirror = malloc(size);
240 ASSERT_NE(buffer->mirror, NULL);
241
242 buffer->ptr = mmap(NULL, size,
243 PROT_READ | PROT_WRITE,
244 MAP_PRIVATE | MAP_ANONYMOUS,
245 buffer->fd, 0);
246 ASSERT_NE(buffer->ptr, MAP_FAILED);
247
248
249
250
251
252 i = 2 * self->page_size / sizeof(*ptr);
253 for (ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
254 ptr[i] = i;
255
256
257 ret = mprotect(buffer->ptr, size, PROT_READ);
258 ASSERT_EQ(ret, 0);
259
260
261 val = *(int *)(buffer->ptr + self->page_size);
262 ASSERT_EQ(val, 0);
263
264
265 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
266 ASSERT_EQ(ret, 0);
267 ASSERT_EQ(buffer->cpages, npages);
268 ASSERT_EQ(buffer->faults, 1);
269
270
271 ptr = buffer->mirror;
272 for (i = 0; i < 2 * self->page_size / sizeof(*ptr); ++i)
273 ASSERT_EQ(ptr[i], 0);
274 for (; i < size / sizeof(*ptr); ++i)
275 ASSERT_EQ(ptr[i], i);
276
277 hmm_buffer_free(buffer);
278}
279
280
281
282
283
284TEST_F(hmm, anon_read_prot)
285{
286 struct hmm_buffer *buffer;
287 unsigned long npages;
288 unsigned long size;
289 unsigned long i;
290 int *ptr;
291 int ret;
292
293 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
294 ASSERT_NE(npages, 0);
295 size = npages << self->page_shift;
296
297 buffer = malloc(sizeof(*buffer));
298 ASSERT_NE(buffer, NULL);
299
300 buffer->fd = -1;
301 buffer->size = size;
302 buffer->mirror = malloc(size);
303 ASSERT_NE(buffer->mirror, NULL);
304
305 buffer->ptr = mmap(NULL, size,
306 PROT_READ | PROT_WRITE,
307 MAP_PRIVATE | MAP_ANONYMOUS,
308 buffer->fd, 0);
309 ASSERT_NE(buffer->ptr, MAP_FAILED);
310
311
312 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
313 ptr[i] = i;
314
315
316 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
317 ptr[i] = -i;
318
319
320 ret = mprotect(buffer->ptr, size, PROT_NONE);
321 ASSERT_EQ(ret, 0);
322
323
324 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
325 ASSERT_EQ(ret, -EFAULT);
326
327
328 ret = mprotect(buffer->ptr, size, PROT_READ);
329 ASSERT_EQ(ret, 0);
330 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
331 ASSERT_EQ(ptr[i], i);
332
333
334 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
335 ASSERT_EQ(ptr[i], -i);
336
337 hmm_buffer_free(buffer);
338}
339
340
341
342
343TEST_F(hmm, anon_write)
344{
345 struct hmm_buffer *buffer;
346 unsigned long npages;
347 unsigned long size;
348 unsigned long i;
349 int *ptr;
350 int ret;
351
352 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
353 ASSERT_NE(npages, 0);
354 size = npages << self->page_shift;
355
356 buffer = malloc(sizeof(*buffer));
357 ASSERT_NE(buffer, NULL);
358
359 buffer->fd = -1;
360 buffer->size = size;
361 buffer->mirror = malloc(size);
362 ASSERT_NE(buffer->mirror, NULL);
363
364 buffer->ptr = mmap(NULL, size,
365 PROT_READ | PROT_WRITE,
366 MAP_PRIVATE | MAP_ANONYMOUS,
367 buffer->fd, 0);
368 ASSERT_NE(buffer->ptr, MAP_FAILED);
369
370
371 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
372 ptr[i] = i;
373
374
375 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
376 ASSERT_EQ(ret, 0);
377 ASSERT_EQ(buffer->cpages, npages);
378 ASSERT_EQ(buffer->faults, 1);
379
380
381 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
382 ASSERT_EQ(ptr[i], i);
383
384 hmm_buffer_free(buffer);
385}
386
387
388
389
390
391TEST_F(hmm, anon_write_prot)
392{
393 struct hmm_buffer *buffer;
394 unsigned long npages;
395 unsigned long size;
396 unsigned long i;
397 int *ptr;
398 int ret;
399
400 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
401 ASSERT_NE(npages, 0);
402 size = npages << self->page_shift;
403
404 buffer = malloc(sizeof(*buffer));
405 ASSERT_NE(buffer, NULL);
406
407 buffer->fd = -1;
408 buffer->size = size;
409 buffer->mirror = malloc(size);
410 ASSERT_NE(buffer->mirror, NULL);
411
412 buffer->ptr = mmap(NULL, size,
413 PROT_READ,
414 MAP_PRIVATE | MAP_ANONYMOUS,
415 buffer->fd, 0);
416 ASSERT_NE(buffer->ptr, MAP_FAILED);
417
418
419 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, 1);
420 ASSERT_EQ(ret, 0);
421 ASSERT_EQ(buffer->cpages, 1);
422 ASSERT_EQ(buffer->faults, 1);
423
424
425 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
426 ptr[i] = i;
427
428
429 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
430 ASSERT_EQ(ret, -EPERM);
431
432
433 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
434 ASSERT_EQ(ptr[i], 0);
435
436
437 ret = mprotect(buffer->ptr, size, PROT_WRITE | PROT_READ);
438 ASSERT_EQ(ret, 0);
439
440
441 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
442 ASSERT_EQ(ret, 0);
443 ASSERT_EQ(buffer->cpages, npages);
444 ASSERT_EQ(buffer->faults, 1);
445
446
447 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
448 ASSERT_EQ(ptr[i], i);
449
450 hmm_buffer_free(buffer);
451}
452
453
454
455
456
457TEST_F(hmm, anon_write_child)
458{
459 struct hmm_buffer *buffer;
460 unsigned long npages;
461 unsigned long size;
462 unsigned long i;
463 int *ptr;
464 pid_t pid;
465 int child_fd;
466 int ret;
467
468 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
469 ASSERT_NE(npages, 0);
470 size = npages << self->page_shift;
471
472 buffer = malloc(sizeof(*buffer));
473 ASSERT_NE(buffer, NULL);
474
475 buffer->fd = -1;
476 buffer->size = size;
477 buffer->mirror = malloc(size);
478 ASSERT_NE(buffer->mirror, NULL);
479
480 buffer->ptr = mmap(NULL, size,
481 PROT_READ | PROT_WRITE,
482 MAP_PRIVATE | MAP_ANONYMOUS,
483 buffer->fd, 0);
484 ASSERT_NE(buffer->ptr, MAP_FAILED);
485
486
487 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
488 ptr[i] = i;
489
490
491 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
492 ptr[i] = -i;
493
494 pid = fork();
495 if (pid == -1)
496 ASSERT_EQ(pid, 0);
497 if (pid != 0) {
498 waitpid(pid, &ret, 0);
499 ASSERT_EQ(WIFEXITED(ret), 1);
500
501
502 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
503 ASSERT_EQ(ptr[i], i);
504 return;
505 }
506
507
508 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
509 ASSERT_EQ(ptr[i], i);
510 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
511 ASSERT_EQ(ptr[i], -i);
512
513
514 child_fd = hmm_open(0);
515 ASSERT_GE(child_fd, 0);
516
517
518 ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
519 ASSERT_EQ(ret, 0);
520 ASSERT_EQ(buffer->cpages, npages);
521 ASSERT_EQ(buffer->faults, 1);
522
523
524 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
525 ASSERT_EQ(ptr[i], -i);
526
527 close(child_fd);
528 exit(0);
529}
530
531
532
533
534
535TEST_F(hmm, anon_write_child_shared)
536{
537 struct hmm_buffer *buffer;
538 unsigned long npages;
539 unsigned long size;
540 unsigned long i;
541 int *ptr;
542 pid_t pid;
543 int child_fd;
544 int ret;
545
546 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
547 ASSERT_NE(npages, 0);
548 size = npages << self->page_shift;
549
550 buffer = malloc(sizeof(*buffer));
551 ASSERT_NE(buffer, NULL);
552
553 buffer->fd = -1;
554 buffer->size = size;
555 buffer->mirror = malloc(size);
556 ASSERT_NE(buffer->mirror, NULL);
557
558 buffer->ptr = mmap(NULL, size,
559 PROT_READ | PROT_WRITE,
560 MAP_SHARED | MAP_ANONYMOUS,
561 buffer->fd, 0);
562 ASSERT_NE(buffer->ptr, MAP_FAILED);
563
564
565 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
566 ptr[i] = i;
567
568
569 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
570 ptr[i] = -i;
571
572 pid = fork();
573 if (pid == -1)
574 ASSERT_EQ(pid, 0);
575 if (pid != 0) {
576 waitpid(pid, &ret, 0);
577 ASSERT_EQ(WIFEXITED(ret), 1);
578
579
580 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
581 ASSERT_EQ(ptr[i], -i);
582 return;
583 }
584
585
586 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
587 ASSERT_EQ(ptr[i], i);
588 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
589 ASSERT_EQ(ptr[i], -i);
590
591
592 child_fd = hmm_open(0);
593 ASSERT_GE(child_fd, 0);
594
595
596 ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
597 ASSERT_EQ(ret, 0);
598 ASSERT_EQ(buffer->cpages, npages);
599 ASSERT_EQ(buffer->faults, 1);
600
601
602 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
603 ASSERT_EQ(ptr[i], -i);
604
605 close(child_fd);
606 exit(0);
607}
608
609
610
611
612TEST_F(hmm, anon_write_huge)
613{
614 struct hmm_buffer *buffer;
615 unsigned long npages;
616 unsigned long size;
617 unsigned long i;
618 void *old_ptr;
619 void *map;
620 int *ptr;
621 int ret;
622
623 size = 2 * TWOMEG;
624
625 buffer = malloc(sizeof(*buffer));
626 ASSERT_NE(buffer, NULL);
627
628 buffer->fd = -1;
629 buffer->size = size;
630 buffer->mirror = malloc(size);
631 ASSERT_NE(buffer->mirror, NULL);
632
633 buffer->ptr = mmap(NULL, size,
634 PROT_READ | PROT_WRITE,
635 MAP_PRIVATE | MAP_ANONYMOUS,
636 buffer->fd, 0);
637 ASSERT_NE(buffer->ptr, MAP_FAILED);
638
639 size = TWOMEG;
640 npages = size >> self->page_shift;
641 map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
642 ret = madvise(map, size, MADV_HUGEPAGE);
643 ASSERT_EQ(ret, 0);
644 old_ptr = buffer->ptr;
645 buffer->ptr = map;
646
647
648 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
649 ptr[i] = i;
650
651
652 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
653 ASSERT_EQ(ret, 0);
654 ASSERT_EQ(buffer->cpages, npages);
655 ASSERT_EQ(buffer->faults, 1);
656
657
658 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
659 ASSERT_EQ(ptr[i], i);
660
661 buffer->ptr = old_ptr;
662 hmm_buffer_free(buffer);
663}
664
665
666
667
668TEST_F(hmm, anon_write_hugetlbfs)
669{
670 struct hmm_buffer *buffer;
671 unsigned long npages;
672 unsigned long size;
673 unsigned long i;
674 int *ptr;
675 int ret;
676 long pagesizes[4];
677 int n, idx;
678
679
680
681 n = gethugepagesizes(pagesizes, 4);
682 if (n <= 0)
683 return;
684 for (idx = 0; --n > 0; ) {
685 if (pagesizes[n] < pagesizes[idx])
686 idx = n;
687 }
688 size = ALIGN(TWOMEG, pagesizes[idx]);
689 npages = size >> self->page_shift;
690
691 buffer = malloc(sizeof(*buffer));
692 ASSERT_NE(buffer, NULL);
693
694 buffer->ptr = get_hugepage_region(size, GHR_STRICT);
695 if (buffer->ptr == NULL) {
696 free(buffer);
697 return;
698 }
699
700 buffer->fd = -1;
701 buffer->size = size;
702 buffer->mirror = malloc(size);
703 ASSERT_NE(buffer->mirror, NULL);
704
705
706 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
707 ptr[i] = i;
708
709
710 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
711 ASSERT_EQ(ret, 0);
712 ASSERT_EQ(buffer->cpages, npages);
713 ASSERT_EQ(buffer->faults, 1);
714
715
716 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
717 ASSERT_EQ(ptr[i], i);
718
719 free_hugepage_region(buffer->ptr);
720 buffer->ptr = NULL;
721 hmm_buffer_free(buffer);
722}
723
724
725
726
727TEST_F(hmm, file_read)
728{
729 struct hmm_buffer *buffer;
730 unsigned long npages;
731 unsigned long size;
732 unsigned long i;
733 int *ptr;
734 int ret;
735 int fd;
736 ssize_t len;
737
738 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
739 ASSERT_NE(npages, 0);
740 size = npages << self->page_shift;
741
742 fd = hmm_create_file(size);
743 ASSERT_GE(fd, 0);
744
745 buffer = malloc(sizeof(*buffer));
746 ASSERT_NE(buffer, NULL);
747
748 buffer->fd = fd;
749 buffer->size = size;
750 buffer->mirror = malloc(size);
751 ASSERT_NE(buffer->mirror, NULL);
752
753
754 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
755 ptr[i] = i;
756 len = pwrite(fd, buffer->mirror, size, 0);
757 ASSERT_EQ(len, size);
758 memset(buffer->mirror, 0, size);
759
760 buffer->ptr = mmap(NULL, size,
761 PROT_READ,
762 MAP_SHARED,
763 buffer->fd, 0);
764 ASSERT_NE(buffer->ptr, MAP_FAILED);
765
766
767 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
768 ASSERT_EQ(ret, 0);
769 ASSERT_EQ(buffer->cpages, npages);
770 ASSERT_EQ(buffer->faults, 1);
771
772
773 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
774 ASSERT_EQ(ptr[i], i);
775
776 hmm_buffer_free(buffer);
777}
778
779
780
781
782TEST_F(hmm, file_write)
783{
784 struct hmm_buffer *buffer;
785 unsigned long npages;
786 unsigned long size;
787 unsigned long i;
788 int *ptr;
789 int ret;
790 int fd;
791 ssize_t len;
792
793 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
794 ASSERT_NE(npages, 0);
795 size = npages << self->page_shift;
796
797 fd = hmm_create_file(size);
798 ASSERT_GE(fd, 0);
799
800 buffer = malloc(sizeof(*buffer));
801 ASSERT_NE(buffer, NULL);
802
803 buffer->fd = fd;
804 buffer->size = size;
805 buffer->mirror = malloc(size);
806 ASSERT_NE(buffer->mirror, NULL);
807
808 buffer->ptr = mmap(NULL, size,
809 PROT_READ | PROT_WRITE,
810 MAP_SHARED,
811 buffer->fd, 0);
812 ASSERT_NE(buffer->ptr, MAP_FAILED);
813
814
815 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
816 ptr[i] = i;
817
818
819 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
820 ASSERT_EQ(ret, 0);
821 ASSERT_EQ(buffer->cpages, npages);
822 ASSERT_EQ(buffer->faults, 1);
823
824
825 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
826 ASSERT_EQ(ptr[i], i);
827
828
829 len = pread(fd, buffer->mirror, size, 0);
830 ASSERT_EQ(len, size);
831 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
832 ASSERT_EQ(ptr[i], i);
833
834 hmm_buffer_free(buffer);
835}
836
837
838
839
840TEST_F(hmm, migrate)
841{
842 struct hmm_buffer *buffer;
843 unsigned long npages;
844 unsigned long size;
845 unsigned long i;
846 int *ptr;
847 int ret;
848
849 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
850 ASSERT_NE(npages, 0);
851 size = npages << self->page_shift;
852
853 buffer = malloc(sizeof(*buffer));
854 ASSERT_NE(buffer, NULL);
855
856 buffer->fd = -1;
857 buffer->size = size;
858 buffer->mirror = malloc(size);
859 ASSERT_NE(buffer->mirror, NULL);
860
861 buffer->ptr = mmap(NULL, size,
862 PROT_READ | PROT_WRITE,
863 MAP_PRIVATE | MAP_ANONYMOUS,
864 buffer->fd, 0);
865 ASSERT_NE(buffer->ptr, MAP_FAILED);
866
867
868 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
869 ptr[i] = i;
870
871
872 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
873 ASSERT_EQ(ret, 0);
874 ASSERT_EQ(buffer->cpages, npages);
875
876
877 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
878 ASSERT_EQ(ptr[i], i);
879
880 hmm_buffer_free(buffer);
881}
882
883
884
885
886
887TEST_F(hmm, migrate_fault)
888{
889 struct hmm_buffer *buffer;
890 unsigned long npages;
891 unsigned long size;
892 unsigned long i;
893 int *ptr;
894 int ret;
895
896 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
897 ASSERT_NE(npages, 0);
898 size = npages << self->page_shift;
899
900 buffer = malloc(sizeof(*buffer));
901 ASSERT_NE(buffer, NULL);
902
903 buffer->fd = -1;
904 buffer->size = size;
905 buffer->mirror = malloc(size);
906 ASSERT_NE(buffer->mirror, NULL);
907
908 buffer->ptr = mmap(NULL, size,
909 PROT_READ | PROT_WRITE,
910 MAP_PRIVATE | MAP_ANONYMOUS,
911 buffer->fd, 0);
912 ASSERT_NE(buffer->ptr, MAP_FAILED);
913
914
915 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
916 ptr[i] = i;
917
918
919 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
920 ASSERT_EQ(ret, 0);
921 ASSERT_EQ(buffer->cpages, npages);
922
923
924 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
925 ASSERT_EQ(ptr[i], i);
926
927
928 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
929 ASSERT_EQ(ptr[i], i);
930
931 hmm_buffer_free(buffer);
932}
933
934
935
936
937TEST_F(hmm2, migrate_mixed)
938{
939 struct hmm_buffer *buffer;
940 unsigned long npages;
941 unsigned long size;
942 int *ptr;
943 unsigned char *p;
944 int ret;
945 int val;
946
947 npages = 6;
948 size = npages << self->page_shift;
949
950 buffer = malloc(sizeof(*buffer));
951 ASSERT_NE(buffer, NULL);
952
953 buffer->fd = -1;
954 buffer->size = size;
955 buffer->mirror = malloc(size);
956 ASSERT_NE(buffer->mirror, NULL);
957
958
959 buffer->ptr = mmap(NULL, size,
960 PROT_NONE,
961 MAP_PRIVATE | MAP_ANONYMOUS,
962 buffer->fd, 0);
963 ASSERT_NE(buffer->ptr, MAP_FAILED);
964 p = buffer->ptr;
965
966
967 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, npages);
968 ASSERT_EQ(ret, -EINVAL);
969
970
971 ret = munmap(buffer->ptr + self->page_size, self->page_size);
972 ASSERT_EQ(ret, 0);
973
974
975 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 3);
976 ASSERT_EQ(ret, -EINVAL);
977
978
979 ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size,
980 PROT_READ);
981 ASSERT_EQ(ret, 0);
982 ptr = (int *)(buffer->ptr + 2 * self->page_size);
983 val = *ptr + 3;
984 ASSERT_EQ(val, 3);
985
986
987 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
988 PROT_READ | PROT_WRITE);
989 ASSERT_EQ(ret, 0);
990 ptr = (int *)(buffer->ptr + 3 * self->page_size);
991 *ptr = val;
992 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
993 PROT_READ);
994 ASSERT_EQ(ret, 0);
995
996
997 ret = mprotect(buffer->ptr + 4 * self->page_size, 2 * self->page_size,
998 PROT_READ | PROT_WRITE);
999 ASSERT_EQ(ret, 0);
1000 ptr = (int *)(buffer->ptr + 4 * self->page_size);
1001 *ptr = val;
1002 ptr = (int *)(buffer->ptr + 5 * self->page_size);
1003 *ptr = val;
1004
1005
1006 buffer->ptr = p + 2 * self->page_size;
1007 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 4);
1008 ASSERT_EQ(ret, 0);
1009 ASSERT_EQ(buffer->cpages, 4);
1010
1011
1012 buffer->ptr = p + 5 * self->page_size;
1013 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_MIGRATE, buffer, 1);
1014 ASSERT_EQ(ret, -ENOENT);
1015 buffer->ptr = p;
1016
1017 buffer->ptr = p;
1018 hmm_buffer_free(buffer);
1019}
1020
1021
1022
1023
1024
1025TEST_F(hmm, migrate_multiple)
1026{
1027 struct hmm_buffer *buffer;
1028 unsigned long npages;
1029 unsigned long size;
1030 unsigned long i;
1031 unsigned long c;
1032 int *ptr;
1033 int ret;
1034
1035 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1036 ASSERT_NE(npages, 0);
1037 size = npages << self->page_shift;
1038
1039 for (c = 0; c < NTIMES; c++) {
1040 buffer = malloc(sizeof(*buffer));
1041 ASSERT_NE(buffer, NULL);
1042
1043 buffer->fd = -1;
1044 buffer->size = size;
1045 buffer->mirror = malloc(size);
1046 ASSERT_NE(buffer->mirror, NULL);
1047
1048 buffer->ptr = mmap(NULL, size,
1049 PROT_READ | PROT_WRITE,
1050 MAP_PRIVATE | MAP_ANONYMOUS,
1051 buffer->fd, 0);
1052 ASSERT_NE(buffer->ptr, MAP_FAILED);
1053
1054
1055 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1056 ptr[i] = i;
1057
1058
1059 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer,
1060 npages);
1061 ASSERT_EQ(ret, 0);
1062 ASSERT_EQ(buffer->cpages, npages);
1063
1064
1065 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1066 ASSERT_EQ(ptr[i], i);
1067
1068
1069 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1070 ASSERT_EQ(ptr[i], i);
1071
1072 hmm_buffer_free(buffer);
1073 }
1074}
1075
1076
1077
1078
1079TEST_F(hmm, anon_read_multiple)
1080{
1081 struct hmm_buffer *buffer;
1082 unsigned long npages;
1083 unsigned long size;
1084 unsigned long i;
1085 unsigned long c;
1086 int *ptr;
1087 int ret;
1088
1089 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1090 ASSERT_NE(npages, 0);
1091 size = npages << self->page_shift;
1092
1093 for (c = 0; c < NTIMES; c++) {
1094 buffer = malloc(sizeof(*buffer));
1095 ASSERT_NE(buffer, NULL);
1096
1097 buffer->fd = -1;
1098 buffer->size = size;
1099 buffer->mirror = malloc(size);
1100 ASSERT_NE(buffer->mirror, NULL);
1101
1102 buffer->ptr = mmap(NULL, size,
1103 PROT_READ | PROT_WRITE,
1104 MAP_PRIVATE | MAP_ANONYMOUS,
1105 buffer->fd, 0);
1106 ASSERT_NE(buffer->ptr, MAP_FAILED);
1107
1108
1109 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1110 ptr[i] = i + c;
1111
1112
1113 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer,
1114 npages);
1115 ASSERT_EQ(ret, 0);
1116 ASSERT_EQ(buffer->cpages, npages);
1117 ASSERT_EQ(buffer->faults, 1);
1118
1119
1120 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1121 ASSERT_EQ(ptr[i], i + c);
1122
1123 hmm_buffer_free(buffer);
1124 }
1125}
1126
1127void *unmap_buffer(void *p)
1128{
1129 struct hmm_buffer *buffer = p;
1130
1131
1132 hmm_nanosleep(hmm_random() % 32000);
1133 munmap(buffer->ptr + buffer->size / 2, buffer->size / 2);
1134 buffer->ptr = NULL;
1135
1136 return NULL;
1137}
1138
1139
1140
1141
1142TEST_F(hmm, anon_teardown)
1143{
1144 unsigned long npages;
1145 unsigned long size;
1146 unsigned long c;
1147 void *ret;
1148
1149 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1150 ASSERT_NE(npages, 0);
1151 size = npages << self->page_shift;
1152
1153 for (c = 0; c < NTIMES; ++c) {
1154 pthread_t thread;
1155 struct hmm_buffer *buffer;
1156 unsigned long i;
1157 int *ptr;
1158 int rc;
1159
1160 buffer = malloc(sizeof(*buffer));
1161 ASSERT_NE(buffer, NULL);
1162
1163 buffer->fd = -1;
1164 buffer->size = size;
1165 buffer->mirror = malloc(size);
1166 ASSERT_NE(buffer->mirror, NULL);
1167
1168 buffer->ptr = mmap(NULL, size,
1169 PROT_READ | PROT_WRITE,
1170 MAP_PRIVATE | MAP_ANONYMOUS,
1171 buffer->fd, 0);
1172 ASSERT_NE(buffer->ptr, MAP_FAILED);
1173
1174
1175 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1176 ptr[i] = i + c;
1177
1178 rc = pthread_create(&thread, NULL, unmap_buffer, buffer);
1179 ASSERT_EQ(rc, 0);
1180
1181
1182 rc = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer,
1183 npages);
1184 if (rc == 0) {
1185 ASSERT_EQ(buffer->cpages, npages);
1186 ASSERT_EQ(buffer->faults, 1);
1187
1188
1189 for (i = 0, ptr = buffer->mirror;
1190 i < size / sizeof(*ptr);
1191 ++i)
1192 ASSERT_EQ(ptr[i], i + c);
1193 }
1194
1195 pthread_join(thread, &ret);
1196 hmm_buffer_free(buffer);
1197 }
1198}
1199
1200
1201
1202
1203TEST_F(hmm2, snapshot)
1204{
1205 struct hmm_buffer *buffer;
1206 unsigned long npages;
1207 unsigned long size;
1208 int *ptr;
1209 unsigned char *p;
1210 unsigned char *m;
1211 int ret;
1212 int val;
1213
1214 npages = 7;
1215 size = npages << self->page_shift;
1216
1217 buffer = malloc(sizeof(*buffer));
1218 ASSERT_NE(buffer, NULL);
1219
1220 buffer->fd = -1;
1221 buffer->size = size;
1222 buffer->mirror = malloc(npages);
1223 ASSERT_NE(buffer->mirror, NULL);
1224
1225
1226 buffer->ptr = mmap(NULL, size,
1227 PROT_NONE,
1228 MAP_PRIVATE | MAP_ANONYMOUS,
1229 buffer->fd, 0);
1230 ASSERT_NE(buffer->ptr, MAP_FAILED);
1231 p = buffer->ptr;
1232
1233
1234 ret = munmap(buffer->ptr + self->page_size, self->page_size);
1235 ASSERT_EQ(ret, 0);
1236
1237
1238 ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size,
1239 PROT_READ);
1240 ASSERT_EQ(ret, 0);
1241 ptr = (int *)(buffer->ptr + 2 * self->page_size);
1242 val = *ptr + 3;
1243 ASSERT_EQ(val, 3);
1244
1245
1246 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1247 PROT_READ | PROT_WRITE);
1248 ASSERT_EQ(ret, 0);
1249 ptr = (int *)(buffer->ptr + 3 * self->page_size);
1250 *ptr = val;
1251 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1252 PROT_READ);
1253 ASSERT_EQ(ret, 0);
1254
1255
1256 ret = mprotect(buffer->ptr + 4 * self->page_size, 3 * self->page_size,
1257 PROT_READ | PROT_WRITE);
1258 ASSERT_EQ(ret, 0);
1259 ptr = (int *)(buffer->ptr + 4 * self->page_size);
1260 *ptr = val;
1261
1262
1263 buffer->ptr = p + 5 * self->page_size;
1264 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_MIGRATE, buffer, 1);
1265 ASSERT_EQ(ret, 0);
1266 ASSERT_EQ(buffer->cpages, 1);
1267
1268
1269 buffer->ptr = p + 6 * self->page_size;
1270 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 1);
1271 ASSERT_EQ(ret, 0);
1272 ASSERT_EQ(buffer->cpages, 1);
1273
1274
1275 buffer->ptr = p;
1276 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1277 ASSERT_EQ(ret, 0);
1278 ASSERT_EQ(buffer->cpages, npages);
1279
1280
1281 m = buffer->mirror;
1282 ASSERT_EQ(m[0], HMM_DMIRROR_PROT_ERROR);
1283 ASSERT_EQ(m[1], HMM_DMIRROR_PROT_ERROR);
1284 ASSERT_EQ(m[2], HMM_DMIRROR_PROT_ZERO | HMM_DMIRROR_PROT_READ);
1285 ASSERT_EQ(m[3], HMM_DMIRROR_PROT_READ);
1286 ASSERT_EQ(m[4], HMM_DMIRROR_PROT_WRITE);
1287 ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL |
1288 HMM_DMIRROR_PROT_WRITE);
1289 ASSERT_EQ(m[6], HMM_DMIRROR_PROT_NONE);
1290
1291 hmm_buffer_free(buffer);
1292}
1293
1294
1295
1296
1297TEST_F(hmm2, double_map)
1298{
1299 struct hmm_buffer *buffer;
1300 unsigned long npages;
1301 unsigned long size;
1302 unsigned long i;
1303 int *ptr;
1304 int ret;
1305
1306 npages = 6;
1307 size = npages << self->page_shift;
1308
1309 buffer = malloc(sizeof(*buffer));
1310 ASSERT_NE(buffer, NULL);
1311
1312 buffer->fd = -1;
1313 buffer->size = size;
1314 buffer->mirror = malloc(npages);
1315 ASSERT_NE(buffer->mirror, NULL);
1316
1317
1318 buffer->ptr = mmap(NULL, size,
1319 PROT_READ | PROT_WRITE,
1320 MAP_PRIVATE | MAP_ANONYMOUS,
1321 buffer->fd, 0);
1322 ASSERT_NE(buffer->ptr, MAP_FAILED);
1323
1324
1325 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1326 ptr[i] = i;
1327
1328
1329 ret = mprotect(buffer->ptr, size, PROT_READ);
1330 ASSERT_EQ(ret, 0);
1331
1332
1333 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_READ, buffer, npages);
1334 ASSERT_EQ(ret, 0);
1335 ASSERT_EQ(buffer->cpages, npages);
1336 ASSERT_EQ(buffer->faults, 1);
1337
1338
1339 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1340 ASSERT_EQ(ptr[i], i);
1341
1342
1343 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_READ, buffer, npages);
1344 ASSERT_EQ(ret, 0);
1345 ASSERT_EQ(buffer->cpages, npages);
1346 ASSERT_EQ(buffer->faults, 1);
1347
1348
1349 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1350 ASSERT_EQ(ptr[i], i);
1351
1352
1353 ret = munmap(buffer->ptr + self->page_size, self->page_size);
1354 ASSERT_EQ(ret, 0);
1355
1356 hmm_buffer_free(buffer);
1357}
1358
1359TEST_HARNESS_MAIN
1360