1 | [has_globals] |
2 | module builtin |
3 | |
4 | // With -prealloc, V calls libc's malloc to get chunks, each at least 16MB |
5 | // in size, as needed. Once a chunk is available, all malloc() calls within |
6 | // V code, that can fit inside the chunk, will use it instead, each bumping a |
7 | // pointer, till the chunk is filled. Once a chunk is filled, a new chunk will |
8 | // be allocated by calling libc's malloc, and the process continues. |
9 | // Each new chunk has a pointer to the old one, and at the end of the program, |
10 | // the entire linked list of chunks is freed. |
11 | // The goal of all this is to amortize the cost of calling libc's malloc, |
12 | // trading higher memory usage for a compiler (or any single threaded batch |
13 | // mode program), for a ~8-10% speed increase. |
14 | // Note: `-prealloc` is NOT safe to be used for multithreaded programs! |
15 | |
16 | // size of the preallocated chunk |
17 | const prealloc_block_size = 16 * 1024 * 1024 |
18 | |
19 | __global g_memory_block &VMemoryBlock |
20 | [heap] |
21 | struct VMemoryBlock { |
22 | mut: |
23 | id int |
24 | cap isize |
25 | start &u8 = 0 |
26 | previous &VMemoryBlock = 0 |
27 | remaining isize |
28 | current &u8 = 0 |
29 | mallocs int |
30 | } |
31 | |
32 | [unsafe] |
33 | fn vmemory_block_new(prev &VMemoryBlock, at_least isize) &VMemoryBlock { |
34 | mut v := unsafe { &VMemoryBlock(C.calloc(1, sizeof(VMemoryBlock))) } |
35 | if unsafe { prev != 0 } { |
36 | v.id = prev.id + 1 |
37 | } |
38 | |
39 | v.previous = prev |
40 | block_size := if at_least < prealloc_block_size { prealloc_block_size } else { at_least } |
41 | v.start = unsafe { C.malloc(block_size) } |
42 | v.cap = block_size |
43 | v.remaining = block_size |
44 | v.current = v.start |
45 | return v |
46 | } |
47 | |
48 | [unsafe] |
49 | fn vmemory_block_malloc(n isize) &u8 { |
50 | unsafe { |
51 | if g_memory_block.remaining < n { |
52 | g_memory_block = vmemory_block_new(g_memory_block, n) |
53 | } |
54 | mut res := &u8(0) |
55 | res = g_memory_block.current |
56 | g_memory_block.remaining -= n |
57 | g_memory_block.mallocs++ |
58 | g_memory_block.current += n |
59 | return res |
60 | } |
61 | } |
62 | |
63 | ///////////////////////////////////////////////// |
64 | |
65 | [unsafe] |
66 | fn prealloc_vinit() { |
67 | unsafe { |
68 | g_memory_block = vmemory_block_new(nil, prealloc_block_size) |
69 | $if !freestanding { |
70 | C.atexit(prealloc_vcleanup) |
71 | } |
72 | } |
73 | } |
74 | |
75 | [unsafe] |
76 | fn prealloc_vcleanup() { |
77 | $if prealloc_stats ? { |
78 | // Note: we do 2 loops here, because string interpolation |
79 | // in the first loop may still use g_memory_block |
80 | // The second loop however should *not* allocate at all. |
81 | mut nr_mallocs := i64(0) |
82 | mut mb := g_memory_block |
83 | for unsafe { mb != 0 } { |
84 | nr_mallocs += mb.mallocs |
85 | eprintln('> freeing mb.id: ${mb.id:3} | cap: ${mb.cap:7} | rem: ${mb.remaining:7} | start: ${voidptr(mb.start)} | current: ${voidptr(mb.current)} | diff: ${u64(mb.current) - u64(mb.start):7} bytes | mallocs: ${mb.mallocs}') |
86 | mb = mb.previous |
87 | } |
88 | eprintln('> nr_mallocs: ${nr_mallocs}') |
89 | } |
90 | unsafe { |
91 | for g_memory_block != 0 { |
92 | C.free(g_memory_block.start) |
93 | g_memory_block = g_memory_block.previous |
94 | } |
95 | } |
96 | } |
97 | |
98 | [unsafe] |
99 | fn prealloc_malloc(n isize) &u8 { |
100 | return unsafe { vmemory_block_malloc(n) } |
101 | } |
102 | |
103 | [unsafe] |
104 | fn prealloc_realloc(old_data &u8, old_size isize, new_size isize) &u8 { |
105 | new_ptr := unsafe { vmemory_block_malloc(new_size) } |
106 | min_size := if old_size < new_size { old_size } else { new_size } |
107 | unsafe { C.memcpy(new_ptr, old_data, min_size) } |
108 | return new_ptr |
109 | } |
110 | |
111 | [unsafe] |
112 | fn prealloc_calloc(n isize) &u8 { |
113 | new_ptr := unsafe { vmemory_block_malloc(n) } |
114 | unsafe { C.memset(new_ptr, 0, n) } |
115 | return new_ptr |
116 | } |