1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include "qemu/osdep.h"
26#include "qemu/units.h"
27#include "qapi/error.h"
28#include "exec/exec-all.h"
29#include "tcg/tcg.h"
30#include "tcg-internal.h"
31
32
33struct tcg_region_tree {
34 QemuMutex lock;
35 GTree *tree;
36
37};
38
39
40
41
42
43
44
45struct tcg_region_state {
46 QemuMutex lock;
47
48
49 void *start_aligned;
50 void *after_prologue;
51 size_t n;
52 size_t size;
53 size_t stride;
54 size_t total_size;
55
56
57 size_t current;
58 size_t agg_size_full;
59};
60
61static struct tcg_region_state region;
62
63
64
65
66
67
68static void *region_trees;
69static size_t tree_size;
70
71bool in_code_gen_buffer(const void *p)
72{
73
74
75
76
77
78 return (size_t)(p - region.start_aligned) <= region.total_size;
79}
80
81#ifdef CONFIG_DEBUG_TCG
82const void *tcg_splitwx_to_rx(void *rw)
83{
84
85 if (rw) {
86 g_assert(in_code_gen_buffer(rw));
87 rw += tcg_splitwx_diff;
88 }
89 return rw;
90}
91
92void *tcg_splitwx_to_rw(const void *rx)
93{
94
95 if (rx) {
96 rx -= tcg_splitwx_diff;
97
98 g_assert(in_code_gen_buffer(rx));
99 }
100 return (void *)rx;
101}
102#endif
103
104
105static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s)
106{
107 if (ptr >= s->ptr + s->size) {
108 return 1;
109 } else if (ptr < s->ptr) {
110 return -1;
111 }
112 return 0;
113}
114
115static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp, gpointer userdata)
116{
117 const struct tb_tc *a = ap;
118 const struct tb_tc *b = bp;
119
120
121
122
123
124
125 if (likely(a->size && b->size)) {
126 if (a->ptr > b->ptr) {
127 return 1;
128 } else if (a->ptr < b->ptr) {
129 return -1;
130 }
131
132 g_assert(a->size == b->size);
133 return 0;
134 }
135
136
137
138
139
140 if (likely(a->size == 0)) {
141 return ptr_cmp_tb_tc(a->ptr, b);
142 }
143 return ptr_cmp_tb_tc(b->ptr, a);
144}
145
146static void tb_destroy(gpointer value)
147{
148 TranslationBlock *tb = value;
149 qemu_spin_destroy(&tb->jmp_lock);
150}
151
152static void tcg_region_trees_init(void)
153{
154 size_t i;
155
156 tree_size = ROUND_UP(sizeof(struct tcg_region_tree), qemu_dcache_linesize);
157 region_trees = qemu_memalign(qemu_dcache_linesize, region.n * tree_size);
158 for (i = 0; i < region.n; i++) {
159 struct tcg_region_tree *rt = region_trees + i * tree_size;
160
161 qemu_mutex_init(&rt->lock);
162 rt->tree = g_tree_new_full(tb_tc_cmp, NULL, NULL, tb_destroy);
163 }
164}
165
166static struct tcg_region_tree *tc_ptr_to_region_tree(const void *p)
167{
168 size_t region_idx;
169
170
171
172
173
174 if (!in_code_gen_buffer(p)) {
175 p -= tcg_splitwx_diff;
176 if (!in_code_gen_buffer(p)) {
177 return NULL;
178 }
179 }
180
181 if (p < region.start_aligned) {
182 region_idx = 0;
183 } else {
184 ptrdiff_t offset = p - region.start_aligned;
185
186 if (offset > region.stride * (region.n - 1)) {
187 region_idx = region.n - 1;
188 } else {
189 region_idx = offset / region.stride;
190 }
191 }
192 return region_trees + region_idx * tree_size;
193}
194
195void tcg_tb_insert(TranslationBlock *tb)
196{
197 struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
198
199 g_assert(rt != NULL);
200 qemu_mutex_lock(&rt->lock);
201 g_tree_insert(rt->tree, &tb->tc, tb);
202 qemu_mutex_unlock(&rt->lock);
203}
204
205void tcg_tb_remove(TranslationBlock *tb)
206{
207 struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
208
209 g_assert(rt != NULL);
210 qemu_mutex_lock(&rt->lock);
211 g_tree_remove(rt->tree, &tb->tc);
212 qemu_mutex_unlock(&rt->lock);
213}
214
215
216
217
218
219
220TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr)
221{
222 struct tcg_region_tree *rt = tc_ptr_to_region_tree((void *)tc_ptr);
223 TranslationBlock *tb;
224 struct tb_tc s = { .ptr = (void *)tc_ptr };
225
226 if (rt == NULL) {
227 return NULL;
228 }
229
230 qemu_mutex_lock(&rt->lock);
231 tb = g_tree_lookup(rt->tree, &s);
232 qemu_mutex_unlock(&rt->lock);
233 return tb;
234}
235
236static void tcg_region_tree_lock_all(void)
237{
238 size_t i;
239
240 for (i = 0; i < region.n; i++) {
241 struct tcg_region_tree *rt = region_trees + i * tree_size;
242
243 qemu_mutex_lock(&rt->lock);
244 }
245}
246
247static void tcg_region_tree_unlock_all(void)
248{
249 size_t i;
250
251 for (i = 0; i < region.n; i++) {
252 struct tcg_region_tree *rt = region_trees + i * tree_size;
253
254 qemu_mutex_unlock(&rt->lock);
255 }
256}
257
258void tcg_tb_foreach(GTraverseFunc func, gpointer user_data)
259{
260 size_t i;
261
262 tcg_region_tree_lock_all();
263 for (i = 0; i < region.n; i++) {
264 struct tcg_region_tree *rt = region_trees + i * tree_size;
265
266 g_tree_foreach(rt->tree, func, user_data);
267 }
268 tcg_region_tree_unlock_all();
269}
270
271size_t tcg_nb_tbs(void)
272{
273 size_t nb_tbs = 0;
274 size_t i;
275
276 tcg_region_tree_lock_all();
277 for (i = 0; i < region.n; i++) {
278 struct tcg_region_tree *rt = region_trees + i * tree_size;
279
280 nb_tbs += g_tree_nnodes(rt->tree);
281 }
282 tcg_region_tree_unlock_all();
283 return nb_tbs;
284}
285
286static void tcg_region_tree_reset_all(void)
287{
288 size_t i;
289
290 tcg_region_tree_lock_all();
291 for (i = 0; i < region.n; i++) {
292 struct tcg_region_tree *rt = region_trees + i * tree_size;
293
294
295 g_tree_ref(rt->tree);
296 g_tree_destroy(rt->tree);
297 }
298 tcg_region_tree_unlock_all();
299}
300
301static void tcg_region_bounds(size_t curr_region, void **pstart, void **pend)
302{
303 void *start, *end;
304
305 start = region.start_aligned + curr_region * region.stride;
306 end = start + region.size;
307
308 if (curr_region == 0) {
309 start = region.after_prologue;
310 }
311
312 if (curr_region == region.n - 1) {
313 end = region.start_aligned + region.total_size;
314 }
315
316 *pstart = start;
317 *pend = end;
318}
319
320static void tcg_region_assign(TCGContext *s, size_t curr_region)
321{
322 void *start, *end;
323
324 tcg_region_bounds(curr_region, &start, &end);
325
326 s->code_gen_buffer = start;
327 s->code_gen_ptr = start;
328 s->code_gen_buffer_size = end - start;
329 s->code_gen_highwater = end - TCG_HIGHWATER;
330}
331
332static bool tcg_region_alloc__locked(TCGContext *s)
333{
334 if (region.current == region.n) {
335 return true;
336 }
337 tcg_region_assign(s, region.current);
338 region.current++;
339 return false;
340}
341
342
343
344
345
346bool tcg_region_alloc(TCGContext *s)
347{
348 bool err;
349
350 size_t size_full = s->code_gen_buffer_size;
351
352 qemu_mutex_lock(®ion.lock);
353 err = tcg_region_alloc__locked(s);
354 if (!err) {
355 region.agg_size_full += size_full - TCG_HIGHWATER;
356 }
357 qemu_mutex_unlock(®ion.lock);
358 return err;
359}
360
361
362
363
364
365static void tcg_region_initial_alloc__locked(TCGContext *s)
366{
367 bool err = tcg_region_alloc__locked(s);
368 g_assert(!err);
369}
370
371void tcg_region_initial_alloc(TCGContext *s)
372{
373 qemu_mutex_lock(®ion.lock);
374 tcg_region_initial_alloc__locked(s);
375 qemu_mutex_unlock(®ion.lock);
376}
377
378
379void tcg_region_reset_all(void)
380{
381 unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
382 unsigned int i;
383
384 qemu_mutex_lock(®ion.lock);
385 region.current = 0;
386 region.agg_size_full = 0;
387
388 for (i = 0; i < n_ctxs; i++) {
389 TCGContext *s = qatomic_read(&tcg_ctxs[i]);
390 tcg_region_initial_alloc__locked(s);
391 }
392 qemu_mutex_unlock(®ion.lock);
393
394 tcg_region_tree_reset_all();
395}
396
397static size_t tcg_n_regions(size_t tb_size, unsigned max_cpus)
398{
399#ifdef CONFIG_USER_ONLY
400 return 1;
401#else
402 size_t n_regions;
403
404
405
406
407
408
409
410
411 if (max_cpus == 1 || !qemu_tcg_mttcg_enabled()) {
412 return 1;
413 }
414
415
416
417
418
419 n_regions = tb_size / (2 * MiB);
420 if (n_regions <= max_cpus) {
421 return max_cpus;
422 }
423 return MIN(n_regions, max_cpus * 8);
424#endif
425}
426
427
428
429
430
431
432
433
434
435
436#define MIN_CODE_GEN_BUFFER_SIZE (1 * MiB)
437
438#if TCG_TARGET_REG_BITS == 32
439#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB)
440#ifdef CONFIG_USER_ONLY
441
442
443
444
445
446#define USE_STATIC_CODE_GEN_BUFFER
447#endif
448#else
449#ifdef CONFIG_USER_ONLY
450
451
452
453
454
455#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (128 * MiB)
456#else
457
458
459
460
461
462#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (1 * GiB)
463#endif
464#endif
465
466#define DEFAULT_CODE_GEN_BUFFER_SIZE \
467 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
468 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
469
470#ifdef __mips__
471
472
473
474
475static inline bool cross_256mb(void *addr, size_t size)
476{
477 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
478}
479
480
481
482
483
484
485static inline void split_cross_256mb(void **obuf, size_t *osize,
486 void *buf1, size_t size1)
487{
488 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
489 size_t size2 = buf1 + size1 - buf2;
490
491 size1 = buf2 - buf1;
492 if (size1 < size2) {
493 size1 = size2;
494 buf1 = buf2;
495 }
496
497 *obuf = buf1;
498 *osize = size1;
499}
500#endif
501
502#ifdef USE_STATIC_CODE_GEN_BUFFER
503static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
504 __attribute__((aligned(CODE_GEN_ALIGN)));
505
506static int alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
507{
508 void *buf, *end;
509 size_t size;
510
511 if (splitwx > 0) {
512 error_setg(errp, "jit split-wx not supported");
513 return -1;
514 }
515
516
517 buf = static_code_gen_buffer;
518 end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
519 buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
520 end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
521
522 size = end - buf;
523
524
525 if (size > tb_size) {
526 size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size);
527 }
528
529#ifdef __mips__
530 if (cross_256mb(buf, size)) {
531 split_cross_256mb(&buf, &size, buf, size);
532 }
533#endif
534
535 region.start_aligned = buf;
536 region.total_size = size;
537
538 return PROT_READ | PROT_WRITE;
539}
540#elif defined(_WIN32)
541static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
542{
543 void *buf;
544
545 if (splitwx > 0) {
546 error_setg(errp, "jit split-wx not supported");
547 return -1;
548 }
549
550 buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
551 PAGE_EXECUTE_READWRITE);
552 if (buf == NULL) {
553 error_setg_win32(errp, GetLastError(),
554 "allocate %zu bytes for jit buffer", size);
555 return false;
556 }
557
558 region.start_aligned = buf;
559 region.total_size = size;
560
561 return PAGE_READ | PAGE_WRITE | PAGE_EXEC;
562}
563#else
564static int alloc_code_gen_buffer_anon(size_t size, int prot,
565 int flags, Error **errp)
566{
567 void *buf;
568
569 buf = mmap(NULL, size, prot, flags, -1, 0);
570 if (buf == MAP_FAILED) {
571 error_setg_errno(errp, errno,
572 "allocate %zu bytes for jit buffer", size);
573 return -1;
574 }
575
576#ifdef __mips__
577 if (cross_256mb(buf, size)) {
578
579
580
581
582 size_t size2;
583 void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
584 switch ((int)(buf2 != MAP_FAILED)) {
585 case 1:
586 if (!cross_256mb(buf2, size)) {
587
588 munmap(buf, size);
589 break;
590 }
591
592 munmap(buf2, size);
593
594 default:
595
596 split_cross_256mb(&buf2, &size2, buf, size);
597 if (buf == buf2) {
598 munmap(buf + size2, size - size2);
599 } else {
600 munmap(buf, size - size2);
601 }
602 size = size2;
603 break;
604 }
605 buf = buf2;
606 }
607#endif
608
609 region.start_aligned = buf;
610 region.total_size = size;
611 return prot;
612}
613
614#ifndef CONFIG_TCG_INTERPRETER
615#ifdef CONFIG_POSIX
616#include "qemu/memfd.h"
617
618static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
619{
620 void *buf_rw = NULL, *buf_rx = MAP_FAILED;
621 int fd = -1;
622
623#ifdef __mips__
624
625 if (alloc_code_gen_buffer_anon(size, PROT_NONE,
626 MAP_PRIVATE | MAP_ANONYMOUS |
627 MAP_NORESERVE, errp) < 0) {
628 return false;
629 }
630
631 buf_rx = region.start_aligned;
632 size = region.total_size;
633#endif
634
635 buf_rw = qemu_memfd_alloc("tcg-jit", size, 0, &fd, errp);
636 if (buf_rw == NULL) {
637 goto fail;
638 }
639
640#ifdef __mips__
641 void *tmp = mmap(buf_rx, size, PROT_READ | PROT_EXEC,
642 MAP_SHARED | MAP_FIXED, fd, 0);
643 if (tmp != buf_rx) {
644 goto fail_rx;
645 }
646#else
647 buf_rx = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0);
648 if (buf_rx == MAP_FAILED) {
649 goto fail_rx;
650 }
651#endif
652
653 close(fd);
654 region.start_aligned = buf_rw;
655 region.total_size = size;
656 tcg_splitwx_diff = buf_rx - buf_rw;
657
658 return PROT_READ | PROT_WRITE;
659
660 fail_rx:
661 error_setg_errno(errp, errno, "failed to map shared memory for execute");
662 fail:
663 if (buf_rx != MAP_FAILED) {
664 munmap(buf_rx, size);
665 }
666 if (buf_rw) {
667 munmap(buf_rw, size);
668 }
669 if (fd >= 0) {
670 close(fd);
671 }
672 return -1;
673}
674#endif
675
676#ifdef CONFIG_DARWIN
677#include <mach/mach.h>
678
679extern kern_return_t mach_vm_remap(vm_map_t target_task,
680 mach_vm_address_t *target_address,
681 mach_vm_size_t size,
682 mach_vm_offset_t mask,
683 int flags,
684 vm_map_t src_task,
685 mach_vm_address_t src_address,
686 boolean_t copy,
687 vm_prot_t *cur_protection,
688 vm_prot_t *max_protection,
689 vm_inherit_t inheritance);
690
691static int alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp)
692{
693 kern_return_t ret;
694 mach_vm_address_t buf_rw, buf_rx;
695 vm_prot_t cur_prot, max_prot;
696
697
698 if (!alloc_code_gen_buffer_anon(size, PROT_READ | PROT_WRITE,
699 MAP_PRIVATE | MAP_ANONYMOUS, errp)) {
700 return -1;
701 }
702
703 buf_rw = (mach_vm_address_t)region.start_aligned;
704 buf_rx = 0;
705 ret = mach_vm_remap(mach_task_self(),
706 &buf_rx,
707 size,
708 0,
709 VM_FLAGS_ANYWHERE,
710 mach_task_self(),
711 buf_rw,
712 false,
713 &cur_prot,
714 &max_prot,
715 VM_INHERIT_NONE);
716 if (ret != KERN_SUCCESS) {
717
718 error_setg(errp, "vm_remap for jit splitwx failed");
719 munmap((void *)buf_rw, size);
720 return -1;
721 }
722
723 if (mprotect((void *)buf_rx, size, PROT_READ | PROT_EXEC) != 0) {
724 error_setg_errno(errp, errno, "mprotect for jit splitwx");
725 munmap((void *)buf_rx, size);
726 munmap((void *)buf_rw, size);
727 return -1;
728 }
729
730 tcg_splitwx_diff = buf_rx - buf_rw;
731 return PROT_READ | PROT_WRITE;
732}
733#endif
734#endif
735
736static int alloc_code_gen_buffer_splitwx(size_t size, Error **errp)
737{
738#ifndef CONFIG_TCG_INTERPRETER
739# ifdef CONFIG_DARWIN
740 return alloc_code_gen_buffer_splitwx_vmremap(size, errp);
741# endif
742# ifdef CONFIG_POSIX
743 return alloc_code_gen_buffer_splitwx_memfd(size, errp);
744# endif
745#endif
746 error_setg(errp, "jit split-wx not supported");
747 return -1;
748}
749
750static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
751{
752 ERRP_GUARD();
753 int prot, flags;
754
755 if (splitwx) {
756 prot = alloc_code_gen_buffer_splitwx(size, errp);
757 if (prot >= 0) {
758 return prot;
759 }
760
761
762
763
764 if (splitwx > 0) {
765 return -1;
766 }
767 error_free_or_abort(errp);
768 }
769
770
771
772
773
774
775
776 prot = PROT_NONE;
777 flags = MAP_PRIVATE | MAP_ANONYMOUS;
778#ifdef CONFIG_DARWIN
779
780 if (!splitwx) {
781 flags |= MAP_JIT;
782 }
783#endif
784
785 return alloc_code_gen_buffer_anon(size, prot, flags, errp);
786}
787#endif
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
818{
819 const size_t page_size = qemu_real_host_page_size;
820 size_t region_size;
821 int have_prot, need_prot;
822
823
824 if (tb_size == 0) {
825 size_t phys_mem = qemu_get_host_physmem();
826 if (phys_mem == 0) {
827 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
828 } else {
829 tb_size = QEMU_ALIGN_DOWN(phys_mem / 8, page_size);
830 tb_size = MIN(DEFAULT_CODE_GEN_BUFFER_SIZE, tb_size);
831 }
832 }
833 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
834 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
835 }
836 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
837 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
838 }
839
840 have_prot = alloc_code_gen_buffer(tb_size, splitwx, &error_fatal);
841 assert(have_prot >= 0);
842
843
844 qemu_madvise(region.start_aligned, region.total_size, QEMU_MADV_HUGEPAGE);
845 if (tcg_splitwx_diff) {
846 qemu_madvise(region.start_aligned + tcg_splitwx_diff,
847 region.total_size, QEMU_MADV_HUGEPAGE);
848 }
849
850
851
852
853
854
855 region.n = tcg_n_regions(tb_size, max_cpus);
856 region_size = tb_size / region.n;
857 region_size = QEMU_ALIGN_DOWN(region_size, page_size);
858
859
860 g_assert(region_size >= 2 * page_size);
861 region.stride = region_size;
862
863
864 region.size = region_size - page_size;
865 region.total_size -= page_size;
866
867
868
869
870
871
872 region.after_prologue = region.start_aligned;
873
874
875 qemu_mutex_init(®ion.lock);
876
877
878
879
880
881
882
883 need_prot = PAGE_READ | PAGE_WRITE;
884#ifndef CONFIG_TCG_INTERPRETER
885 if (tcg_splitwx_diff == 0) {
886 need_prot |= PAGE_EXEC;
887 }
888#endif
889 for (size_t i = 0, n = region.n; i < n; i++) {
890 void *start, *end;
891
892 tcg_region_bounds(i, &start, &end);
893 if (have_prot != need_prot) {
894 int rc;
895
896 if (need_prot == (PAGE_READ | PAGE_WRITE | PAGE_EXEC)) {
897 rc = qemu_mprotect_rwx(start, end - start);
898 } else if (need_prot == (PAGE_READ | PAGE_WRITE)) {
899 rc = qemu_mprotect_rw(start, end - start);
900 } else {
901 g_assert_not_reached();
902 }
903 if (rc) {
904 error_setg_errno(&error_fatal, errno,
905 "mprotect of jit buffer");
906 }
907 }
908 if (have_prot != 0) {
909
910 (void)qemu_mprotect_none(end, page_size);
911 }
912 }
913
914 tcg_region_trees_init();
915
916
917
918
919
920
921 tcg_region_initial_alloc__locked(&tcg_init_ctx);
922}
923
924void tcg_region_prologue_set(TCGContext *s)
925{
926
927 g_assert(region.start_aligned == s->code_gen_buffer);
928 region.after_prologue = s->code_ptr;
929
930
931 tcg_region_assign(s, 0);
932
933
934 tcg_register_jit(tcg_splitwx_to_rx(region.after_prologue),
935 region.start_aligned + region.total_size -
936 region.after_prologue);
937}
938
939
940
941
942
943
944
945
946size_t tcg_code_size(void)
947{
948 unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
949 unsigned int i;
950 size_t total;
951
952 qemu_mutex_lock(®ion.lock);
953 total = region.agg_size_full;
954 for (i = 0; i < n_ctxs; i++) {
955 const TCGContext *s = qatomic_read(&tcg_ctxs[i]);
956 size_t size;
957
958 size = qatomic_read(&s->code_gen_ptr) - s->code_gen_buffer;
959 g_assert(size <= s->code_gen_buffer_size);
960 total += size;
961 }
962 qemu_mutex_unlock(®ion.lock);
963 return total;
964}
965
966
967
968
969
970
971size_t tcg_code_capacity(void)
972{
973 size_t guard_size, capacity;
974
975
976 guard_size = region.stride - region.size;
977 capacity = region.total_size;
978 capacity -= (region.n - 1) * guard_size;
979 capacity -= region.n * TCG_HIGHWATER;
980
981 return capacity;
982}
983