v / vlib / dlmalloc
Raw file | 1632 loc (1439 sloc) | 35.47 KB | Latest commit hash 725456cde
1// This is a version of dlmalloc.c ported to V. You can find the original
2// source at ftp://g.oswego.edu/pub/misc/malloc.c
3//
4// The original source was written by Doug Lea and released to the public domain
5//
6//
7// # Why dlmalloc?
8//
9// This library does not rely on C code. The primary purpose is for use in freestanding
10// build mode and for WASM target.
11//
12// dlmalloc is not the most performant allocator. It's main purpose is to be
13// easily portable and easy to learn. Here we have straight port of C and dlmalloc-rs
14// versions of dlmalloc.
15module dlmalloc
16
17import math.bits
18
19/*
20$if debug ? {
21 #include "valgrind.h"
22}
23
24fn //C.VALGRIND_MALLOCLIKE_BLOCK(addr voidptr, size usize, rzb usize,is_zeroed bool)
25fn //C.VALGRIND_FREELIKE_BLOCK(addr voidptr, rzB usize)
26fn //C.VALGRIND_MAKE_MEM_UNDEFINED(addr voidptr, size usize)
27*/
28
29pub const (
30 n_small_bins = 32
31 n_tree_bins = 32
32 small_bin_shift = 3
33 tree_bin_shift = 8
34
35 max_release_check_rate = 4095
36)
37
38fn usize_leading_zeros(x usize) usize {
39 if sizeof(usize) == 8 {
40 return usize(bits.leading_zeros_64(u64(x)))
41 } else {
42 return usize(bits.leading_zeros_32(u32(x)))
43 }
44}
45
46[inline]
47fn default_granularity() usize {
48 return 64 * 1024
49}
50
51[inline]
52fn default_trim_threshold() usize {
53 return 2 * 1024 * 1024
54}
55
56[inline]
57fn malloc_alignment() usize {
58 return sizeof(usize) * 2
59}
60
61[inline]
62fn chunk_overhead() usize {
63 return sizeof(usize)
64}
65
66[inline]
67fn min_large_size() usize {
68 return 1 << dlmalloc.tree_bin_shift
69}
70
71[inline]
72fn mmap_chunk_overhead() usize {
73 return 2 * sizeof(usize)
74}
75
76[inline]
77fn max_small_size() usize {
78 return min_large_size() - 1
79}
80
81[inline]
82fn max_small_request() usize {
83 return max_small_size() - (malloc_alignment() - 1) - chunk_overhead()
84}
85
86[inline]
87fn min_chunk_size() usize {
88 return align_up(sizeof(Chunk), malloc_alignment())
89}
90
91[inline]
92fn chunk_mem_offset() usize {
93 return 2 * sizeof(usize)
94}
95
96[inline]
97fn min_request() usize {
98 return min_chunk_size() - chunk_overhead() - 1
99}
100
101[inline]
102fn top_foot_size() usize {
103 return align_offset_usize(chunk_mem_offset()) + pad_request(sizeof(Segment)) + min_chunk_size()
104}
105
106[inline]
107fn max_request() usize {
108 return calc_max_request()
109}
110
111[inline]
112fn mmap_foot_pad() usize {
113 return 4 * sizeof(usize)
114}
115
116fn min_sys_alloc_space() usize {
117 return ((~0 - (default_granularity() + top_foot_size() + malloc_alignment()) +
118 1) & ~malloc_alignment()) - chunk_overhead() + 1
119}
120
121fn calc_max_request() usize {
122 x := min_sys_alloc_space()
123 y := (~min_chunk_size() + 1) << 2
124
125 if x < y {
126 return x
127 } else {
128 return y
129 }
130}
131
132fn pad_request(amt usize) usize {
133 return align_up(amt + chunk_overhead(), malloc_alignment())
134}
135
136fn align_offset_usize(addr usize) usize {
137 return align_up(addr, malloc_alignment()) - addr
138}
139
140fn is_aligned(a usize) bool {
141 return a & (malloc_alignment() - 1) == 0
142}
143
144fn is_small(s usize) bool {
145 return s >> dlmalloc.small_bin_shift < dlmalloc.n_small_bins
146}
147
148fn small_index2size(idx u32) usize {
149 return usize(idx) << dlmalloc.small_bin_shift
150}
151
152fn small_index(size usize) u32 {
153 return u32(size >> dlmalloc.small_bin_shift)
154}
155
156fn align_up(a usize, alignment usize) usize {
157 if a % alignment == 0 {
158 return a
159 } else {
160 return a - (a % alignment) + alignment
161 }
162}
163
164fn left_bits(x u32) u32 {
165 return (x << 1) | (~(x << 1)) + 1
166}
167
168fn least_bit(x u32) u32 {
169 return x & (~x + 1)
170}
171
172fn leftshift_for_tree_index(x u32) u32 {
173 y := usize(x)
174 if y == dlmalloc.n_tree_bins - 1 {
175 return 0
176 } else {
177 return u32(sizeof(usize) * 8 - 1 - ((y >> 1) + dlmalloc.tree_bin_shift - 2))
178 }
179}
180
181[unsafe]
182fn align_as_chunk(ptr_ voidptr) &Chunk {
183 ptr := usize(ptr_)
184 chunk := ptr + chunk_mem_offset()
185 return &Chunk(ptr + align_offset_usize(chunk))
186}
187
188fn request_2_size(req usize) usize {
189 if req < min_request() {
190 return min_chunk_size()
191 } else {
192 return pad_request(req)
193 }
194}
195
196fn overhead_for(c &Chunk) usize {
197 if c.mmapped() {
198 return mmap_chunk_overhead()
199 } else {
200 return chunk_overhead()
201 }
202}
203
204// In order for dlmalloc to efficently manage memory, it needs a way to communicate with the underlying platform.
205// This `Allocator` type provides an interface for this communication.
206//
207//
208// Why not `interface?` Interfaces require memory allocation so it is simpler to pass a struct.
209pub struct Allocator {
210 alloc fn (voidptr, usize) (voidptr, usize, u32) = unsafe { nil }
211 remap fn (voidptr, voidptr, usize, usize, bool) voidptr = unsafe { nil }
212 free_part fn (voidptr, voidptr, usize, usize) bool = unsafe { nil }
213 free_ fn (voidptr, voidptr, usize) bool = unsafe { nil }
214 can_release_part fn (voidptr, u32) bool = unsafe { nil }
215 allocates_zeros fn (voidptr) bool = unsafe { nil }
216 page_size fn (voidptr) usize = unsafe { nil } // not a constant field because some platforms might have different page sizes depending on configs
217 data voidptr
218}
219
220pub struct Dlmalloc {
221 system_allocator Allocator
222 max_request usize = 4294901657
223mut:
224 // bin maps
225 smallmap u32 // bin map for small bins
226 treemap u32 // bin map for tree bins
227
228 smallbins [66]&Chunk // small bins, it is actually (n_small_bins + 1) * 2
229 treebins [n_tree_bins]&TreeChunk
230 dvsize usize
231 topsize usize
232 dv &Chunk = unsafe { nil }
233 top &Chunk = unsafe { nil }
234 footprint usize
235 max_footprint usize
236 seg Segment
237 trim_check u32
238 least_addr voidptr
239 release_checks usize
240}
241
242pub fn new(system_allocator Allocator) Dlmalloc {
243 return Dlmalloc{
244 smallmap: 0
245 treemap: 0
246 smallbins: unsafe { [(dlmalloc.n_small_bins + 1) * 2]&Chunk{} }
247 treebins: unsafe { [dlmalloc.n_tree_bins]&TreeChunk{} }
248 dvsize: 0
249 topsize: 0
250 dv: unsafe { nil }
251 top: unsafe { nil }
252 footprint: 0
253 max_footprint: 0
254 seg: Segment{unsafe { nil }, 0, unsafe { nil }, 0}
255 trim_check: 0
256 least_addr: unsafe { nil }
257 release_checks: 0
258 system_allocator: system_allocator
259 max_request: 4294901657
260 }
261}
262
263[heap]
264struct Chunk {
265mut:
266 prev_foot usize
267 head usize
268 prev &Chunk = unsafe { nil }
269 next &Chunk = unsafe { nil }
270}
271
272[heap]
273struct Segment {
274mut:
275 base voidptr
276 size usize
277 next &Segment = unsafe { nil }
278 flags u32
279}
280
281[heap]
282struct TreeChunk {
283mut:
284 chunk Chunk
285 child [2]voidptr
286 parent voidptr
287 index u32
288}
289
290const (
291 pinuse = 1 << 0
292 cinuse = 1 << 1
293 flag4 = 1 << 2
294 inuse = pinuse | cinuse
295 flag_bits = pinuse | cinuse | flag4
296)
297
298fn fencepost_head() usize {
299 return dlmalloc.inuse | sizeof(usize)
300}
301
302fn (c &Chunk) size() usize {
303 return c.head & ~dlmalloc.flag_bits
304}
305
306fn (c &Chunk) mmapped() bool {
307 return c.head & dlmalloc.inuse == 0
308}
309
310fn (c &Chunk) next() &Chunk {
311 mut me := usize(c)
312 me = me + c.size()
313 return &Chunk(me)
314}
315
316fn (c &Chunk) prev() &Chunk {
317 mut me := usize(c)
318 me = me + c.prev_foot
319 return &Chunk(me)
320}
321
322fn (c &Chunk) cinuse() bool {
323 return c.head & dlmalloc.cinuse != 0
324}
325
326fn (c &Chunk) pinuse() bool {
327 return c.head & dlmalloc.pinuse != 0
328}
329
330fn (mut c Chunk) clear_pinuse() {
331 c.head &= ~dlmalloc.pinuse
332}
333
334fn (c &Chunk) inuse() bool {
335 return c.head & dlmalloc.inuse != dlmalloc.pinuse
336}
337
338fn (mut c Chunk) set_inuse(size usize) {
339 c.head = (c.head & dlmalloc.pinuse) | size | dlmalloc.cinuse
340 mut next := c.plus_offset(size)
341 next.head |= dlmalloc.pinuse
342}
343
344fn (mut c Chunk) set_inuse_and_pinuse(size usize) {
345 c.head = dlmalloc.pinuse | size | dlmalloc.cinuse
346 mut next := c.plus_offset(size)
347 next.head |= dlmalloc.pinuse
348}
349
350fn (mut c Chunk) set_size_and_pinuse_of_inuse_chunk(size usize) {
351 c.head = size | dlmalloc.pinuse | dlmalloc.cinuse
352}
353
354fn (mut c Chunk) set_size_and_pinuse_of_free_chunk(size usize) {
355 c.head = size | dlmalloc.pinuse
356 c.set_foot(size)
357}
358
359fn (mut c Chunk) set_free_with_pinuse(size usize, n_ &Chunk) {
360 mut n := unsafe { n_ }
361 n.clear_pinuse()
362 c.set_size_and_pinuse_of_free_chunk(size)
363}
364
365fn (c &Chunk) set_foot(size usize) {
366 mut next := c.plus_offset(size)
367 next.prev_foot = size
368}
369
370fn (c &Chunk) plus_offset(offset usize) &Chunk {
371 return &Chunk((usize(c) + offset))
372}
373
374fn (c &Chunk) minus_offset(offset usize) &Chunk {
375 return &Chunk(usize(c) - offset)
376}
377
378fn (c &Chunk) to_mem() voidptr {
379 return voidptr(usize(c) + chunk_mem_offset())
380}
381
382fn chunk_from_mem(mem_ voidptr) &Chunk {
383 mem := usize(mem_)
384 return &Chunk((mem - chunk_mem_offset()))
385}
386
387fn (tree &TreeChunk) leftmost_child() &TreeChunk {
388 left := unsafe { &TreeChunk(tree.child[0]) }
389 if isnil(left) {
390 return tree.child[1]
391 } else {
392 return left
393 }
394}
395
396fn (tree &TreeChunk) chunk() &Chunk {
397 return &tree.chunk
398}
399
400fn (tree &TreeChunk) size(treemap u32) usize {
401 return tree.chunk.head & ~dlmalloc.flag_bits
402}
403
404[unsafe]
405fn (tree &TreeChunk) next() &TreeChunk {
406 unsafe {
407 return &TreeChunk(tree.chunk().next)
408 }
409}
410
411[unsafe]
412fn (tree &TreeChunk) prev() &TreeChunk {
413 unsafe {
414 return &TreeChunk(tree.chunk().prev)
415 }
416}
417
418const extern = 1 << 0
419
420fn (seg &Segment) is_extern() bool {
421 return seg.flags & dlmalloc.extern != 0
422}
423
424fn (seg &Segment) can_release_part(sys_alloc &Allocator) bool {
425 return sys_alloc.can_release_part(sys_alloc.data, seg.flags >> 1)
426}
427
428fn (seg &Segment) sys_flags() u32 {
429 return seg.flags >> 1
430}
431
432fn (seg &Segment) holds(addr voidptr) bool {
433 return seg.base <= addr && addr < seg.top()
434}
435
436fn (seg &Segment) top() voidptr {
437 return voidptr(usize(seg.base) + seg.size)
438}
439
440[unsafe]
441pub fn (dl &Dlmalloc) calloc_must_clear(ptr voidptr) bool {
442 return !dl.system_allocator.allocates_zeros(dl.system_allocator.data)
443 || !chunk_from_mem(ptr).mmapped()
444}
445
446[unsafe]
447fn (mut dl Dlmalloc) smallbin_at(idx u32) &Chunk {
448 unsafe {
449 return &Chunk(&dl.smallbins[idx * 2])
450 }
451}
452
453[unsafe]
454fn (mut dl Dlmalloc) treebin_at(idx u32) &&TreeChunk {
455 return &dl.treebins[idx]
456}
457
458fn (dl &Dlmalloc) compute_tree_index(size usize) u32 {
459 x := size >> dlmalloc.tree_bin_shift
460 if x == 0 {
461 return 0
462 } else if x > 0xffff {
463 return dlmalloc.n_tree_bins - 1
464 } else {
465 k := sizeof(usize) * 8 - 1 - usize_leading_zeros(x)
466 return u32((k << 1) + (size >> (k + dlmalloc.tree_bin_shift - 1) & 1))
467 }
468}
469
470[unsafe]
471fn (mut dl Dlmalloc) unlink_chunk(chunk &Chunk, size usize) {
472 unsafe {
473 if is_small(size) {
474 dl.unlink_small_chunk(chunk, size)
475 } else {
476 dl.unlink_large_chunk(&TreeChunk(chunk))
477 }
478 }
479}
480
481[unsafe]
482fn (mut dl Dlmalloc) unlink_small_chunk(chunk_ &Chunk, size usize) {
483 mut chunk := unsafe { chunk_ }
484 mut f := chunk.prev
485 mut b := chunk.next
486 idx := small_index(size)
487
488 if voidptr(b) == voidptr(f) {
489 unsafe { dl.clear_smallmap(idx) }
490 } else {
491 f.next = b
492 b.prev = f
493 }
494}
495
496[unsafe]
497fn (mut dl Dlmalloc) unlink_large_chunk(chunk_ &TreeChunk) {
498 unsafe {
499 mut chunk := chunk_
500 mut xp := &TreeChunk(chunk.parent)
501 mut r := &TreeChunk(nil)
502 if voidptr(chunk.next()) != voidptr(chunk) {
503 mut f := chunk.prev()
504 r = chunk.next()
505 f.chunk.next = r.chunk()
506 r.chunk.prev = f.chunk()
507 } else {
508 mut rp := &&TreeChunk(&chunk.child[1])
509 if isnil(*rp) {
510 rp = &&TreeChunk(&chunk.child[0])
511 }
512
513 r = *rp
514 if !isnil(*rp) {
515 for {
516 mut cp := &&TreeChunk(&rp.child[1])
517 if isnil(*cp) {
518 cp = &&TreeChunk(&rp.child[0])
519 }
520 if isnil(*cp) {
521 break
522 }
523 rp = cp
524 }
525 r = *rp
526 *rp = &TreeChunk(nil)
527 }
528 }
529
530 if isnil(xp) {
531 return
532 }
533
534 mut h := dl.treebin_at(chunk.index)
535 if voidptr(chunk) == voidptr(*h) {
536 *h = r
537 if isnil(r) {
538 dl.clear_treemap(chunk.index)
539 }
540 } else {
541 if xp.child[0] == chunk {
542 xp.child[0] = r
543 } else {
544 xp.child[1] = r
545 }
546 }
547
548 if !isnil(r) {
549 r.parent = xp
550 mut c0 := &TreeChunk(chunk.child[0])
551 if !isnil(c0) {
552 r.child[0] = c0
553 c0.parent = r
554 }
555 mut c1 := &TreeChunk(chunk.child[1])
556 if !isnil(c1) {
557 r.child[1] = c1
558 c1.parent = r
559 }
560 }
561 }
562}
563
564[unsafe]
565fn (mut dl Dlmalloc) unlink_first_small_chunk(head_ &Chunk, next_ &Chunk, idx u32) {
566 mut next := unsafe { next_ }
567 mut head := unsafe { head_ }
568 println('Unlink first small')
569 mut ptr := next.prev
570 if voidptr(head) == voidptr(ptr) {
571 unsafe { dl.clear_smallmap(idx) }
572 } else {
573 ptr.next = head
574 head.prev = ptr
575 }
576}
577
578// calloc is the same as `malloc`, except if the allocation succeeds it's guaranteed
579// to point to `size` bytes of zeros.
580[unsafe]
581pub fn (mut dl Dlmalloc) calloc(size usize) voidptr {
582 unsafe {
583 ptr := dl.malloc(size)
584 if !isnil(ptr) && dl.calloc_must_clear(ptr) {
585 vmemset(ptr, 0, int(size))
586 }
587 return ptr
588 }
589}
590
591// free_ behaves as libc free, but operates within the given space
592[unsafe]
593pub fn (mut dl Dlmalloc) free_(mem voidptr) {
594 unsafe {
595 // C.VALGRIND_FREELIKE_BLOCK(mem, 0)
596 mut p := chunk_from_mem(mem)
597
598 mut psize := p.size()
599
600 next := p.plus_offset(psize)
601
602 if !p.pinuse() {
603 prevsize := p.prev_foot
604
605 if p.mmapped() {
606 psize += prevsize + mmap_foot_pad()
607 if dl.system_allocator.free_(dl.system_allocator.data, voidptr(usize(p) - prevsize),
608 psize)
609 {
610 dl.footprint -= psize
611 }
612
613 return
614 }
615
616 prev := p.minus_offset(prevsize)
617 psize += prevsize
618 p = prev
619 if voidptr(p) != voidptr(dl.dv) {
620 dl.unlink_chunk(p, prevsize)
621 } else if (next.head & dlmalloc.inuse) == dlmalloc.inuse {
622 dl.dvsize = psize
623 p.set_free_with_pinuse(psize, next)
624
625 return
626 }
627 }
628 // consolidate forward if we can
629 if !next.cinuse() {
630 if voidptr(next) == voidptr(dl.top) {
631 dl.topsize += psize
632 p.head = 0
633
634 tsize := dl.topsize
635 dl.top = p
636 p.head = tsize | dlmalloc.pinuse
637 if voidptr(p) == voidptr(dl.dv) {
638 dl.dv = nil
639 dl.dvsize = 0
640 }
641
642 if dl.should_trim(tsize) {
643 dl.sys_trim(0)
644 }
645
646 return
647 } else if voidptr(next) == voidptr(dl.dv) {
648 dl.dvsize += psize
649 dsize := dl.dvsize
650 dl.dv = p
651 p.set_size_and_pinuse_of_free_chunk(dsize)
652
653 return
654 } else {
655 nsize := next.size()
656 psize += nsize
657 dl.unlink_chunk(next, nsize)
658 p.set_size_and_pinuse_of_free_chunk(psize)
659
660 if voidptr(p) == voidptr(dl.dv) {
661 dl.dvsize = psize
662 return
663 }
664 }
665 } else {
666 p.set_free_with_pinuse(psize, next)
667 }
668
669 if is_small(psize) {
670 dl.insert_small_chunk(p, psize)
671 } else {
672 dl.insert_large_chunk(&TreeChunk(p), psize)
673 dl.release_checks -= 1
674 if dl.release_checks == 0 {
675 dl.release_unused_segments()
676 }
677 }
678 }
679}
680
681fn (dl Dlmalloc) should_trim(size usize) bool {
682 return size > dl.trim_check
683}
684
685[unsafe]
686fn (mut dl Dlmalloc) sys_trim(pad_ usize) bool {
687 unsafe {
688 mut pad := pad_
689 mut released := usize(0)
690 if pad < dl.max_request && !isnil(dl.top) {
691 pad += top_foot_size()
692 if dl.topsize > pad {
693 unit := usize(default_granularity())
694 extra := ((dl.topsize - pad + unit - 1) / unit - 1) * unit
695 mut sp := dl.segment_holding(dl.top)
696
697 if !sp.is_extern() {
698 if sp.can_release_part(&dl.system_allocator) {
699 if sp.size >= extra && !dl.has_segment_link(sp) {
700 newsize := sp.size - extra
701 if dl.system_allocator.free_part(dl.system_allocator.data,
702 sp.base, sp.size, newsize)
703 {
704 released = extra
705 }
706 }
707 }
708 }
709
710 if released != 0 {
711 sp.size -= released
712 dl.footprint -= released
713 top := dl.top
714 topsize := dl.topsize - released
715 dl.init_top(top, topsize)
716 }
717 }
718
719 released += dl.release_unused_segments()
720
721 if released == 0 && dl.topsize > dl.trim_check {
722 dl.trim_check = 1 << 31
723 }
724 }
725 return released != 0
726 }
727}
728
729[unsafe]
730fn (mut dl Dlmalloc) release_unused_segments() usize {
731 unsafe {
732 mut released := usize(0)
733 mut nsegs := usize(0)
734 mut pred := &dl.seg
735 mut sp := pred.next
736 for !isnil(sp) {
737 base := sp.base
738 size := sp.size
739 next := sp.next
740
741 nsegs += 1
742
743 if sp.can_release_part(&dl.system_allocator) && !sp.is_extern() {
744 mut p := align_as_chunk(base)
745 psize := p.size()
746 chunk_top := voidptr(usize(p) + psize)
747 top := voidptr(usize(base) + (size - top_foot_size()))
748 if !p.inuse() && chunk_top >= top {
749 mut tp := &TreeChunk(p)
750 if voidptr(p) == voidptr(dl.dv) {
751 dl.dv = nil
752 dl.dvsize = 0
753 } else {
754 dl.unlink_large_chunk(tp)
755 }
756
757 if dl.system_allocator.free_(dl.system_allocator.data, base, size) {
758 released += size
759 dl.footprint -= size
760 sp = pred
761 sp.next = next
762 } else {
763 // back out if we can't unmap
764 dl.insert_large_chunk(tp, psize)
765 }
766 }
767 }
768 pred = sp
769 sp = next
770 }
771 dl.release_checks = if nsegs > dlmalloc.max_release_check_rate {
772 nsegs
773 } else {
774 dlmalloc.max_release_check_rate
775 }
776 return released
777 }
778}
779
780[unsafe]
781fn (dl &Dlmalloc) has_segment_link(ptr &Segment) bool {
782 mut sp := &dl.seg
783 for !isnil(sp) {
784 if ptr.holds(sp) {
785 return true
786 }
787 sp = sp.next
788 }
789 return false
790}
791
792[unsafe]
793fn (mut dl Dlmalloc) replace_dv(chunk &Chunk, size usize) {
794 dvs := dl.dvsize
795 if dvs != 0 {
796 dv := dl.dv
797 unsafe {
798 dl.insert_small_chunk(dv, dvs)
799 }
800 }
801 dl.dvsize = size
802 dl.dv = chunk
803}
804
805[unsafe]
806fn (mut dl Dlmalloc) insert_chunk(chunk &Chunk, size usize) {
807 unsafe {
808 if is_small(size) {
809 dl.insert_small_chunk(chunk, size)
810 } else {
811 dl.insert_large_chunk(&TreeChunk(chunk), size)
812 }
813 }
814}
815
816[unsafe]
817fn (mut dl Dlmalloc) insert_small_chunk(chunk_ &Chunk, size usize) {
818 mut chunk := unsafe { chunk_ }
819 idx := small_index(size)
820 unsafe {
821 mut head := dl.smallbin_at(idx)
822 mut f := head
823 if !dl.smallmap_is_marked(idx) {
824 dl.mark_smallmap(idx)
825 } else {
826 f = head.prev
827 }
828
829 assert !isnil(f)
830 assert !isnil(head)
831 head.prev = chunk
832 f.next = chunk
833 chunk.prev = f
834 chunk.next = head
835 }
836}
837
838[unsafe]
839fn (mut dl Dlmalloc) insert_large_chunk(chunk_ &TreeChunk, size usize) {
840 unsafe {
841 mut chunk := chunk_
842 idx := dl.compute_tree_index(size)
843 mut h := dl.treebin_at(idx)
844
845 chunk.index = idx
846 chunk.child[0] = nil
847 chunk.child[1] = nil
848
849 mut chunkc := chunk.chunk()
850 if !dl.treemap_is_marked(idx) {
851 dl.mark_treemap(idx)
852 *h = chunk
853 chunk.parent = voidptr(h)
854 assert !isnil(chunkc)
855 chunkc.prev = chunkc
856 chunkc.next = chunkc
857 } else {
858 mut t := *h
859 mut k := size << leftshift_for_tree_index(idx)
860 for {
861 if t.chunk().size() != size {
862 c_ := &t.child[(k >> sizeof(usize) * 8 - 1) & 1]
863 mut c := &&TreeChunk(c_)
864 k <<= 1
865 if !isnil(c) {
866 t = *c
867 } else {
868 *c = chunk
869 chunk.parent = t
870 chunkc.next = chunkc
871 chunkc.prev = chunkc
872 break
873 }
874 } else {
875 tc := t.chunk()
876 f := tc.prev
877 f.next = chunkc
878 assert !isnil(chunkc)
879 tc.prev = chunkc
880 chunkc.prev = f
881 chunkc.next = tc
882 chunk.parent = nil
883 break
884 }
885 }
886 }
887 }
888}
889
890[unsafe]
891fn (mut dl Dlmalloc) clear_smallmap(idx u32) {
892 dl.smallmap &= ~(1 << idx)
893}
894
895[unsafe]
896fn (mut dl Dlmalloc) mark_smallmap(idx u32) {
897 dl.smallmap |= 1 << idx
898}
899
900[unsafe]
901fn (mut dl Dlmalloc) smallmap_is_marked(idx u32) bool {
902 return (dl.smallmap & (1 << idx)) != 0
903}
904
905[unsafe]
906fn (mut dl Dlmalloc) clear_treemap(idx u32) {
907 dl.treemap &= ~(1 << idx)
908}
909
910[unsafe]
911fn (mut dl Dlmalloc) mark_treemap(idx u32) {
912 dl.treemap |= 1 << idx
913}
914
915[unsafe]
916fn (mut dl Dlmalloc) treemap_is_marked(idx u32) bool {
917 return dl.treemap & (1 << idx) != 0
918}
919
920pub fn (mut dl Dlmalloc) malloc(size usize) voidptr {
921 unsafe {
922 p := dl.malloc_real(size)
923 if !isnil(p) {
924 // C.VALGRIND_MALLOCLIKE_BLOCK(p, size, 0,false)
925 }
926 return p
927 }
928}
929
930/// malloc behaves as libc malloc, but operates within the given space
931[unsafe]
932fn (mut dl Dlmalloc) malloc_real(size usize) voidptr {
933 mut nb := usize(0)
934 unsafe {
935 if size <= max_small_request() {
936 nb = request_2_size(size)
937 mut idx := small_index(nb)
938 smallbits := dl.smallmap >> idx
939 if smallbits & 0b11 != 0 {
940 idx += ~smallbits & 1
941
942 b := dl.smallbin_at(idx)
943 mut p := b.prev
944 smallsize := small_index2size(idx)
945
946 dl.unlink_first_small_chunk(b, p, idx)
947
948 p.set_inuse_and_pinuse(smallsize)
949
950 ret := p.to_mem()
951
952 return ret
953 }
954
955 if nb > dl.dvsize {
956 // if there's some other bin with some memory, then we just use
957 // the next smallest bin
958
959 // todo(playXE): Find out why in the world this part of code does not work in
960 // some programs (esp. x.json2). Theoretically disabling this path just
961 // makes fragmentation a little worser but nothing really bad should happen
962 if false && smallbits != 0 {
963 leftbits := (smallbits << idx) & left_bits(1 << idx)
964 leastbit := least_bit(leftbits)
965 i := u32(bits.trailing_zeros_32(leastbit))
966 mut b := dl.smallbin_at(i)
967 mut p := b.prev
968 dl.unlink_first_small_chunk(b, p, i)
969 smallsize := small_index2size(i)
970 rsize := smallsize - nb
971 if sizeof(usize) != 4 && rsize < min_chunk_size() {
972 p.set_inuse_and_pinuse(smallsize)
973 } else {
974 p.set_size_and_pinuse_of_inuse_chunk(nb)
975 mut r := p.plus_offset(nb)
976 r.set_size_and_pinuse_of_free_chunk(size)
977 dl.replace_dv(r, rsize)
978 }
979
980 ret := p.to_mem()
981
982 return ret
983 } else if dl.treemap != 0 {
984 mem := dl.tmalloc_small(nb)
985 if !isnil(mem) {
986 return mem
987 }
988 }
989 }
990 } else if size >= dl.max_request {
991 return nil
992 } else {
993 nb = pad_request(size)
994 if dl.treemap != 0 {
995 mem := dl.tmalloc_large(nb)
996 if !isnil(mem) {
997 return mem
998 }
999 }
1000 }
1001
1002 // use the `dv` node if we can, splitting it if necessary or otherwise
1003 // exhausting the entire chunk
1004 if nb <= dl.dvsize {
1005 rsize := dl.dvsize - nb
1006 mut p := dl.dv
1007 if rsize >= min_chunk_size() {
1008 dl.dv = p.plus_offset(nb)
1009 dl.dvsize = rsize
1010 mut r := dl.dv
1011 r.set_size_and_pinuse_of_free_chunk(rsize)
1012 p.set_size_and_pinuse_of_inuse_chunk(nb)
1013 } else {
1014 dvs := dl.dvsize
1015 dl.dvsize = 0
1016 dl.dv = nil
1017 p.set_inuse_and_pinuse(dvs)
1018 }
1019 ret := p.to_mem()
1020
1021 return ret
1022 }
1023 // Split the top node if we can
1024 if nb < dl.topsize {
1025 dl.topsize -= nb
1026 rsize := dl.topsize
1027 mut p := dl.top
1028 dl.top = p.plus_offset(nb)
1029 mut r := dl.top
1030 r.head = rsize | dlmalloc.pinuse
1031 p.set_size_and_pinuse_of_inuse_chunk(nb)
1032 ret := p.to_mem()
1033
1034 return ret
1035 }
1036
1037 return dl.sys_alloc(nb)
1038 }
1039}
1040
1041[unsafe]
1042fn (mut dl Dlmalloc) init_bins() {
1043 unsafe {
1044 for i in 0 .. dlmalloc.n_small_bins {
1045 mut bin := dl.smallbin_at(i)
1046 bin.prev = bin
1047 bin.next = bin
1048 }
1049 }
1050}
1051
1052[unsafe]
1053fn (mut dl Dlmalloc) init_top(ptr &Chunk, size_ usize) {
1054 offset := align_offset_usize(ptr.to_mem())
1055 mut p := ptr.plus_offset(offset)
1056
1057 size := size_ - offset
1058 dl.top = p
1059 dl.topsize = size
1060 // C.VALGRIND_MAKE_MEM_UNDEFINED(p.plus_offset(sizeof(usize)),sizeof(usize))
1061 p.head = size | dlmalloc.pinuse
1062 // C.VALGRIND_MAKE_MEM_UNDEFINED(p.plus_offset(size + sizeof(usize)),sizeof(usize))
1063 p.plus_offset(size).head = top_foot_size()
1064 dl.trim_check = u32(default_trim_threshold())
1065}
1066
1067[unsafe]
1068fn (mut dl Dlmalloc) sys_alloc(size usize) voidptr {
1069 page_size := dl.system_allocator.page_size(dl.system_allocator.data)
1070 asize := align_up(align_up(size + top_foot_size() + malloc_alignment(), default_granularity()),
1071 page_size)
1072 unsafe {
1073 alloc := dl.system_allocator.alloc
1074 tbase, mut tsize, flags := alloc(dl.system_allocator.data, asize)
1075
1076 if isnil(tbase) {
1077 return tbase
1078 }
1079
1080 dl.footprint += tsize
1081 dl.max_footprint = if dl.max_footprint > dl.footprint {
1082 dl.max_footprint
1083 } else {
1084 dl.footprint
1085 }
1086 if isnil(dl.top) {
1087 if isnil(dl.least_addr) || tbase < dl.least_addr {
1088 dl.least_addr = tbase
1089 }
1090 dl.seg.base = tbase
1091 dl.seg.size = tsize
1092 dl.seg.flags = flags
1093 dl.release_checks = dlmalloc.max_release_check_rate
1094 dl.init_bins()
1095 tsize_ := tsize - top_foot_size()
1096 dl.init_top(&Chunk(tbase), tsize_)
1097 } else {
1098 mut sp := &dl.seg
1099 for !isnil(sp) && voidptr(tbase) != voidptr(sp.top()) {
1100 sp = sp.next
1101 }
1102
1103 if !isnil(sp) && !sp.is_extern() && sp.sys_flags() == flags && sp.holds(dl.top) {
1104 sp.size += tsize
1105 ptr := dl.top
1106 size_ := dl.topsize + tsize
1107 dl.init_top(ptr, size_)
1108 } else {
1109 if tbase < dl.least_addr {
1110 dl.least_addr = tbase
1111 } else {
1112 dl.least_addr = dl.least_addr
1113 }
1114 sp = &dl.seg
1115 for !isnil(sp) && sp.base != voidptr(usize(tbase) + tsize) {
1116 sp = sp.next
1117 }
1118
1119 if !isnil(sp) && !sp.is_extern() && sp.sys_flags() == flags {
1120 oldbase := sp.base
1121 sp.base = tbase
1122 sp.size = tsize
1123 return dl.prepend_alloc(tbase, oldbase, size)
1124 } else {
1125 dl.add_segment(tbase, tsize, flags)
1126 }
1127 }
1128 }
1129
1130 if size < dl.topsize {
1131 dl.topsize -= size
1132 rsize := dl.topsize
1133 mut p := dl.top
1134 dl.top = p.plus_offset(size)
1135 mut r := dl.top
1136 r.head = rsize | dlmalloc.pinuse
1137 p.set_size_and_pinuse_of_inuse_chunk(size)
1138 ret := p.to_mem()
1139
1140 return ret
1141 }
1142 }
1143 return unsafe { nil }
1144}
1145
1146[unsafe]
1147fn (mut dl Dlmalloc) tmalloc_small(size usize) voidptr {
1148 unsafe {
1149 leastbit := least_bit(dl.treemap)
1150 i := bits.trailing_zeros_32(leastbit)
1151 mut v := *dl.treebin_at(u32(i))
1152 mut t := v
1153 mut rsize := t.size(dl.treemap)
1154 for {
1155 t = t.leftmost_child()
1156 if isnil(t) {
1157 break
1158 }
1159
1160 trem := t.chunk().size() - size
1161 if trem < rsize {
1162 rsize = trem
1163 v = t
1164 }
1165 }
1166
1167 mut vc := v.chunk()
1168 r := &TreeChunk(vc.plus_offset(size))
1169 dl.unlink_large_chunk(v)
1170 if rsize < min_chunk_size() {
1171 vc.set_inuse_and_pinuse(rsize + size)
1172 } else {
1173 mut rc := r.chunk()
1174 vc.set_size_and_pinuse_of_inuse_chunk(size)
1175 rc.set_size_and_pinuse_of_free_chunk(rsize)
1176 dl.replace_dv(rc, rsize)
1177 }
1178
1179 return vc.to_mem()
1180 }
1181}
1182
1183[unsafe]
1184fn (mut dl Dlmalloc) tmalloc_large(size usize) voidptr {
1185 unsafe {
1186 mut v := &TreeChunk(nil)
1187 mut rsize := ~size + 1
1188 idx := dl.compute_tree_index(size)
1189 mut t := *dl.treebin_at(idx)
1190 if !isnil(t) {
1191 mut sizebits := size << leftshift_for_tree_index(idx)
1192 mut rst := voidptr(u64(0))
1193 for {
1194 csize := t.chunk().size()
1195 if csize >= size && csize - size < rsize {
1196 v = t
1197 rsize = csize - size
1198 if rsize == 0 {
1199 break
1200 }
1201 }
1202
1203 rt := t.child[1]
1204 t = t.child[(sizebits >> (sizeof(usize) * 8 - 1)) & 1]
1205 if !isnil(rt) && voidptr(rt) != voidptr(t) {
1206 rst = rt
1207 }
1208 if isnil(t) {
1209 t = rst
1210 break
1211 }
1212 sizebits <<= 1
1213 }
1214 }
1215
1216 if isnil(t) && isnil(v) {
1217 leftbits := left_bits(1 << idx) & dl.treemap
1218 if leftbits != 0 {
1219 leastbit := least_bit(leftbits)
1220 i := bits.trailing_zeros_32(leastbit)
1221 t = *dl.treebin_at(u32(i))
1222 }
1223 }
1224 // Find the smallest of this tree or subtree
1225 for !isnil(t) {
1226 csize := t.chunk().size()
1227 if csize >= size && csize - size < rsize {
1228 rsize = csize - size
1229 v = t
1230 }
1231 t = t.leftmost_child()
1232 }
1233
1234 if isnil(v) || (dl.dvsize >= size && !(rsize < dl.dvsize - size)) {
1235 return nil
1236 }
1237
1238 mut vc := v.chunk()
1239 mut r := vc.plus_offset(size)
1240 dl.unlink_large_chunk(v)
1241 if rsize < min_chunk_size() {
1242 vc.set_inuse_and_pinuse(rsize + size)
1243 } else {
1244 vc.set_size_and_pinuse_of_inuse_chunk(size)
1245 r.set_size_and_pinuse_of_free_chunk(rsize)
1246 dl.insert_chunk(r, rsize)
1247 }
1248
1249 return vc.to_mem()
1250 }
1251}
1252
1253[unsafe]
1254fn (mut dl Dlmalloc) prepend_alloc(newbase voidptr, oldbase voidptr, size usize) voidptr {
1255 unsafe {
1256 mut p := align_as_chunk(newbase)
1257 mut oldfirst := align_as_chunk(oldbase)
1258 psize := usize(oldfirst) - usize(p)
1259 mut q := p.plus_offset(size)
1260 mut qsize := psize - size
1261 // C.VALGRIND_MAKE_MEM_UNDEFINED(p.plus_offset(sizeof(usize)),size)
1262 p.set_size_and_pinuse_of_inuse_chunk(size)
1263
1264 if qsize >= sizeof(TreeChunk) {
1265 // C.VALGRIND_MAKE_MEM_UNDEFINED(q, sizeof(TreeChunk))
1266 } else {
1267 // C.VALGRIND_MAKE_MEM_UNDEFINED(q,sizeof(Chunk))
1268 }
1269 // C.VALGRIND_MAKE_MEM_UNDEFINED(q.plus_offset(qsize),sizeof(usize))
1270
1271 if voidptr(oldfirst) == voidptr(dl.top) {
1272 dl.topsize += qsize
1273 tsize := dl.topsize
1274 dl.top = q
1275 q.head = tsize | dlmalloc.pinuse
1276 } else if voidptr(oldfirst) == voidptr(dl.dv) {
1277 dl.dvsize += qsize
1278 dsize := dl.dvsize
1279 dl.dv = q
1280 q.set_size_and_pinuse_of_free_chunk(dsize)
1281 } else {
1282 if !oldfirst.inuse() {
1283 nsize := oldfirst.size()
1284 dl.unlink_chunk(oldfirst, nsize)
1285 oldfirst = oldfirst.plus_offset(nsize)
1286 qsize += nsize
1287 }
1288 q.set_free_with_pinuse(qsize, oldfirst)
1289 dl.insert_chunk(q, qsize)
1290 }
1291
1292 ret := p.to_mem()
1293 return ret
1294 }
1295}
1296
1297[unsafe]
1298fn (mut dl Dlmalloc) add_segment(tbase voidptr, tsize usize, flags u32) {
1299 // TODO: what in the world is this function doing????
1300 unsafe {
1301 old_top := dl.top
1302 mut oldsp := dl.segment_holding(old_top)
1303 old_end := oldsp.top()
1304 ssize := pad_request(sizeof(Segment))
1305 mut offset := ssize + sizeof(usize) * 4 + malloc_alignment() - 1
1306 rawsp := voidptr(usize(old_end) - offset)
1307 offset = align_offset_usize((&Chunk(rawsp)).to_mem())
1308 asp := voidptr(usize(rawsp) + offset)
1309 csp := if asp < voidptr(usize(old_top) + min_chunk_size()) { old_top } else { asp }
1310 mut sp := &Chunk(csp)
1311 mut ss := &Segment(sp.to_mem())
1312 mut tnext := sp.plus_offset(ssize)
1313 mut p := tnext
1314 mut nfences := 0
1315
1316 size := tsize - top_foot_size()
1317 dl.init_top(&Chunk(tbase), size)
1318
1319 sp.set_size_and_pinuse_of_inuse_chunk(ssize)
1320 *ss = dl.seg
1321 dl.seg.base = tbase
1322 dl.seg.size = tsize
1323 dl.seg.flags = flags
1324 dl.seg.next = ss
1325
1326 for {
1327 nextp := p.plus_offset(sizeof(usize))
1328 p.head = fencepost_head()
1329 nfences += 1
1330 if nextp.head < old_end {
1331 p = nextp
1332 } else {
1333 break
1334 }
1335 }
1336 // TODO: why 2?
1337 assert nfences >= 2
1338 if voidptr(csp) != voidptr(old_top) {
1339 mut q := &Chunk(old_top)
1340 psize := usize(csp) - usize(old_top)
1341 tn := q.plus_offset(psize)
1342 q.set_free_with_pinuse(psize, tn)
1343
1344 dl.insert_chunk(q, psize)
1345 }
1346 }
1347}
1348
1349[unsafe]
1350fn (mut dl Dlmalloc) segment_holding(ptr voidptr) &Segment {
1351 mut sp := &dl.seg
1352 for !isnil(sp) {
1353 if sp.base <= ptr && ptr < sp.top() {
1354 return sp
1355 }
1356 sp = sp.next
1357 }
1358 return &Segment(unsafe { nil })
1359}
1360
1361// realloc behaves as libc realloc, but operates within the given space
1362[unsafe]
1363pub fn (mut dl Dlmalloc) realloc(oldmem voidptr, bytes usize) voidptr {
1364 if bytes >= dl.max_request {
1365 return unsafe { nil }
1366 }
1367 unsafe {
1368 nb := request_2_size(bytes)
1369 mut oldp := chunk_from_mem(oldmem)
1370 newp := dl.try_realloc_chunk(oldp, nb, true)
1371 if !isnil(newp) {
1372 return newp.to_mem()
1373 }
1374
1375 ptr := dl.malloc(bytes)
1376 if !isnil(ptr) {
1377 oc := oldp.size() - overhead_for(oldp)
1378 copy_bytes := if oc < bytes { oc } else { bytes }
1379 vmemcpy(ptr, oldmem, int(copy_bytes))
1380 }
1381
1382 return ptr
1383 }
1384}
1385
1386// memaligns allocates memory aligned to `alignment_`. Only call this with power-of-two alignment
1387// and alignment > dlmalloc.malloc_alignment
1388[unsafe]
1389pub fn (mut dl Dlmalloc) memalign(alignment_ usize, bytes usize) voidptr {
1390 mut alignment := alignment_
1391 if alignment < min_chunk_size() {
1392 alignment = min_chunk_size()
1393 }
1394
1395 if bytes >= max_request() - alignment {
1396 return unsafe { nil }
1397 }
1398 unsafe {
1399 nb := request_2_size(bytes)
1400 req := nb + alignment + min_chunk_size() - chunk_overhead()
1401 mem := dl.malloc(req)
1402 if isnil(mem) {
1403 return mem
1404 }
1405
1406 mut p := chunk_from_mem(mem)
1407 if usize(mem) & (alignment - 1) != 0 {
1408 // Here we find an aligned sopt inside the chunk. Since we need to
1409 // give back leading space in a chunk of at least `min_chunk_size`,
1410 // if the first calculation places us at a spot with less than
1411 // `min_chunk_size` leader we can move to the next aligned spot.
1412 // we've allocated enough total room so that this is always possible
1413 br_ := (usize(mem) + alignment - 1) & (~alignment + 1)
1414 br := chunk_from_mem(voidptr(br_))
1415 mut pos := voidptr(u64(0))
1416 if usize(br) - usize(p) > min_chunk_size() {
1417 pos = voidptr(br)
1418 } else {
1419 pos = voidptr(usize(br) + alignment)
1420 }
1421
1422 mut newp := &Chunk(pos)
1423 leadsize := usize(pos) - usize(p)
1424 newsize := p.size() - leadsize
1425
1426 if p.mmapped() {
1427 newp.prev_foot = p.prev_foot + leadsize
1428 newp.head = newsize
1429 } else {
1430 newp.set_inuse(newsize)
1431 p.set_inuse(leadsize)
1432 dl.dispose_chunk(p, leadsize)
1433 }
1434 p = newp
1435 }
1436
1437 if !p.mmapped() {
1438 size := p.size()
1439 if size > nb + min_chunk_size() {
1440 remainder_size := size - nb
1441 mut remainder := p.plus_offset(nb)
1442 p.set_inuse(nb)
1443 remainder.set_inuse(remainder_size)
1444 dl.dispose_chunk(remainder, remainder_size)
1445 }
1446 }
1447
1448 // C.VALGRIND_MALLOCLIKE_BLOCK(p.to_mem(), bytes, 0, false)
1449 return p.to_mem()
1450 }
1451}
1452
1453[unsafe]
1454fn (mut dl Dlmalloc) try_realloc_chunk(p_ &Chunk, nb usize, can_move bool) &Chunk {
1455 unsafe {
1456 mut p := p_
1457 oldsize := p.size()
1458 mut next := p.plus_offset(oldsize)
1459 if p.mmapped() {
1460 return dl.mmap_resize(p, nb, can_move)
1461 } else if oldsize >= nb {
1462 rsize := oldsize - nb
1463 if rsize >= min_chunk_size() {
1464 mut r := p.plus_offset(nb)
1465 p.set_inuse(nb)
1466 r.set_inuse(rsize)
1467 dl.dispose_chunk(r, rsize)
1468 }
1469 return p
1470 } else if voidptr(next) == voidptr(dl.top) {
1471 if oldsize + dl.topsize <= nb {
1472 return nil
1473 }
1474
1475 newsize := oldsize + dl.topsize
1476 newtopsize := newsize - nb
1477 mut newtop := p.plus_offset(nb)
1478 p.set_inuse(nb)
1479 newtop.head = newtopsize | dlmalloc.pinuse
1480 dl.top = newtop
1481 dl.topsize = newtopsize
1482 return p
1483 } else if voidptr(next) == voidptr(dl.dv) {
1484 dvs := dl.dvsize
1485 if oldsize + dvs < nb {
1486 return nil
1487 }
1488
1489 dsize := oldsize + dvs - nb
1490 if dsize >= min_chunk_size() {
1491 mut r := p.plus_offset(nb)
1492 mut n := r.plus_offset(dsize)
1493 p.set_inuse(nb)
1494 r.set_size_and_pinuse_of_free_chunk(dsize)
1495 n.clear_pinuse()
1496 dl.dvsize = dsize
1497 dl.dv = r
1498 } else {
1499 newsize := oldsize + dvs
1500 p.set_inuse(newsize)
1501 dl.dvsize = 0
1502 dl.dv = nil
1503 }
1504 return p
1505 } else if !next.cinuse() {
1506 nextsize := next.size()
1507 if oldsize + nextsize < nb {
1508 return nil
1509 }
1510 rsize := oldsize + nextsize - nb
1511 dl.unlink_chunk(next, nextsize)
1512 if rsize < min_chunk_size() {
1513 newsize := oldsize + nextsize
1514 p.set_inuse(newsize)
1515 } else {
1516 r := p.plus_offset(nb)
1517 p.set_inuse(nb)
1518 r.set_inuse(rsize)
1519 dl.dispose_chunk(r, rsize)
1520 }
1521 return p
1522 } else {
1523 return nil
1524 }
1525 }
1526}
1527
1528[unsafe]
1529fn (mut dl Dlmalloc) mmap_resize(oldp_ &Chunk, nb usize, can_move bool) &Chunk {
1530 mut oldp := unsafe { oldp_ }
1531 oldsize := oldp.size()
1532 if is_small(nb) {
1533 return unsafe { nil }
1534 }
1535 // Keep the old chunk if it's big enough but not too big
1536 if oldsize >= nb + sizeof(usize) && (oldsize - nb) <= (default_granularity() << 1) {
1537 return oldp
1538 }
1539
1540 offset := oldp.prev_foot
1541 oldmmsize := oldsize + offset + mmap_foot_pad()
1542 newmmsize := dl.mmap_align(nb + 6 * sizeof(usize) + malloc_alignment() - 1)
1543
1544 ptr := dl.system_allocator.remap(dl.system_allocator.data, voidptr(usize(oldp) - offset),
1545 oldmmsize, newmmsize, can_move)
1546 if isnil(ptr) {
1547 return unsafe { nil }
1548 }
1549
1550 mut newp := unsafe { &Chunk(voidptr(usize(ptr) + offset)) }
1551 psize := newmmsize - offset - mmap_foot_pad()
1552 newp.head = psize
1553 newp.plus_offset(psize).head = fencepost_head()
1554 newp.plus_offset(psize + sizeof(usize)).head = 0
1555 if ptr < dl.least_addr {
1556 dl.least_addr = ptr
1557 }
1558 dl.footprint = dl.footprint + newmmsize - oldmmsize
1559 if dl.footprint > dl.max_footprint {
1560 dl.max_footprint = dl.footprint
1561 }
1562 return newp
1563}
1564
1565fn (dl &Dlmalloc) mmap_align(a usize) usize {
1566 return align_up(a, dl.system_allocator.page_size(dl.system_allocator.data))
1567}
1568
1569[unsafe]
1570fn (mut dl Dlmalloc) dispose_chunk(p_ &Chunk, psize_ usize) {
1571 mut p := unsafe { p_ }
1572 mut psize := psize_
1573 unsafe {
1574 mut next := p.plus_offset(psize)
1575 if !p.pinuse() {
1576 prevsize := p.prev_foot
1577 if p.mmapped() {
1578 psize += prevsize + mmap_foot_pad()
1579
1580 if dl.system_allocator.free_(dl.system_allocator.data, voidptr(usize(p) - prevsize),
1581 psize)
1582 {
1583 dl.footprint -= psize
1584 }
1585 return
1586 }
1587
1588 prev := p.minus_offset(prevsize)
1589 psize += prevsize
1590 p = prev
1591 if voidptr(p) != voidptr(dl.dv) {
1592 dl.unlink_chunk(p, prevsize)
1593 } else if next.head & dlmalloc.inuse == dlmalloc.inuse {
1594 dl.dvsize = psize
1595 p.set_free_with_pinuse(psize, next)
1596 return
1597 }
1598 }
1599
1600 if !next.cinuse() {
1601 if voidptr(next) == voidptr(dl.top) {
1602 dl.topsize += psize
1603 tsize := dl.topsize
1604 dl.top = p
1605 p.head = tsize | dlmalloc.pinuse
1606 if voidptr(p) == voidptr(dl.dv) {
1607 dl.dv = nil
1608 dl.dvsize = 0
1609 }
1610 return
1611 } else if voidptr(next) == voidptr(dl.dv) {
1612 dl.dvsize += psize
1613 dvsize := dl.dvsize
1614 dl.dv = p
1615 p.set_size_and_pinuse_of_free_chunk(dvsize)
1616 return
1617 } else {
1618 nsize := next.size()
1619 psize += nsize
1620 dl.unlink_chunk(next, nsize)
1621 p.set_size_and_pinuse_of_free_chunk(psize)
1622 if voidptr(p) == voidptr(dl.dv) {
1623 dl.dvsize = psize
1624 return
1625 }
1626 }
1627 } else {
1628 p.set_free_with_pinuse(psize, next)
1629 }
1630 dl.insert_chunk(p, psize)
1631 }
1632}