v / vlib / builtin
Raw file | 777 loc (720 sloc) | 20.18 KB | Latest commit hash 6d223b9a2
1// Copyright (c) 2019-2023 Alexander Medvednikov. All rights reserved.
2// Use of this source code is governed by an MIT license
3// that can be found in the LICENSE file.
4module builtin
5
6/*
7This is a highly optimized hashmap implementation. It has several traits that
8in combination makes it very fast and memory efficient. Here is a short expl-
9anation of each trait. After reading this you should have a basic understand-
10ing of how it functions:
11
121. Hash-function: Wyhash. Wyhash is the fastest hash-function for short keys
13passing SMHasher, so it was an obvious choice.
14
152. Open addressing: Robin Hood Hashing. With this method, a hash-collision is
16resolved by probing. As opposed to linear probing, Robin Hood hashing has a
17simple but clever twist: As new keys are inserted, old keys are shifted arou-
18nd in a way such that all keys stay reasonably close to the slot they origin-
19ally hash to. A new key may displace a key already inserted if its probe cou-
20nt is larger than that of the key at the current position.
21
223. Memory layout: key-value pairs are stored in a `DenseArray`. This is a dy-
23namic array with a very low volume of unused memory, at the cost of more rea-
24llocations when inserting elements. It also preserves the order of the key-v-
25alues. This array is named `key_values`. Instead of probing a new key-value,
26this map probes two 32-bit numbers collectively. The first number has its 8
27most significant bits reserved for the probe-count and the remaining 24 bits
28are cached bits from the hash which are utilized for faster re-hashing. This
29number is often referred to as `meta`. The other 32-bit number is the index
30at which the key-value was pushed to in `key_values`. Both of these numbers
31are stored in a sparse array `metas`. The `meta`s and `kv_index`s are stored
32at even and odd indices, respectively:
33
34metas = [meta, kv_index, 0, 0, meta, kv_index, 0, 0, meta, kv_index, ...]
35key_values = [kv, kv, kv, ...]
36
374. The size of metas is a power of two. This enables the use of bitwise AND
38to convert the 64-bit hash to a bucket/index that doesn't overflow metas. If
39the size is power of two you can use "hash & (SIZE - 1)" instead of "hash %
40SIZE". Modulo is extremely expensive so using '&' is a big performance impro-
41vement. The general concern with this approach is that you only make use of
42the lower bits of the hash which can cause more collisions. This is solved by
43using a well-dispersed hash-function.
44
455. The hashmap keeps track of the highest probe_count. The trick is to alloc-
46ate `extra_metas` > max(probe_count), so you never have to do any bounds-che-
47cking since the extra meta memory ensures that a meta will never go beyond
48the last index.
49
506. Cached rehashing. When the `load_factor` of the map exceeds the `max_load_
51factor` the size of metas is doubled and all the key-values are "rehashed" to
52find the index for their meta's in the new array. Instead of rehashing compl-
53etely, it simply uses the cached-hashbits stored in the meta, resulting in
54much faster rehashing.
55*/
56const (
57 // Number of bits from the hash stored for each entry
58 hashbits = 24
59 // Number of bits from the hash stored for rehashing
60 max_cached_hashbits = 16
61 // Initial log-number of buckets in the hashtable
62 init_log_capicity = 5
63 // Initial number of buckets in the hashtable
64 init_capicity = 1 << init_log_capicity
65 // Maximum load-factor (len / capacity)
66 max_load_factor = 0.8
67 // Initial highest even index in metas
68 init_even_index = init_capicity - 2
69 // Used for incrementing `extra_metas` when max
70 // probe count is too high, to avoid overflow
71 extra_metas_inc = 4
72 // Bitmask to select all the hashbits
73 hash_mask = u32(0x00FFFFFF)
74 // Used for incrementing the probe-count
75 probe_inc = u32(0x01000000)
76)
77
78// DenseArray represents a dynamic array with very low growth factor
79struct DenseArray {
80 key_bytes int
81 value_bytes int
82mut:
83 cap int
84 len int
85 deletes u32 // count
86 // array allocated (with `cap` bytes) on first deletion
87 // has non-zero element when key deleted
88 all_deleted &u8 = unsafe { nil }
89 keys &u8 = unsafe { nil }
90 values &u8 = unsafe { nil }
91}
92
93[inline]
94fn new_dense_array(key_bytes int, value_bytes int) DenseArray {
95 cap := 8
96 return DenseArray{
97 key_bytes: key_bytes
98 value_bytes: value_bytes
99 cap: cap
100 len: 0
101 deletes: 0
102 all_deleted: 0
103 keys: unsafe { malloc(__at_least_one(u64(cap) * u64(key_bytes))) }
104 values: unsafe { malloc(__at_least_one(u64(cap) * u64(value_bytes))) }
105 }
106}
107
108[inline]
109fn (d &DenseArray) key(i int) voidptr {
110 return unsafe { voidptr(d.keys + i * d.key_bytes) }
111}
112
113// for cgen
114[inline]
115fn (d &DenseArray) value(i int) voidptr {
116 return unsafe { voidptr(d.values + i * d.value_bytes) }
117}
118
119[inline]
120fn (d &DenseArray) has_index(i int) bool {
121 return d.deletes == 0 || unsafe { d.all_deleted[i] } == 0
122}
123
124// Make space to append an element and return index
125// The growth-factor is roughly 1.125 `(x + (x >> 3))`
126[inline]
127fn (mut d DenseArray) expand() int {
128 old_cap := d.cap
129 old_key_size := d.key_bytes * old_cap
130 old_value_size := d.value_bytes * old_cap
131 if d.cap == d.len {
132 d.cap += d.cap >> 3
133 unsafe {
134 d.keys = realloc_data(d.keys, old_key_size, d.key_bytes * d.cap)
135 d.values = realloc_data(d.values, old_value_size, d.value_bytes * d.cap)
136 if d.deletes != 0 {
137 d.all_deleted = realloc_data(d.all_deleted, old_cap, d.cap)
138 vmemset(voidptr(d.all_deleted + d.len), 0, d.cap - d.len)
139 }
140 }
141 }
142 push_index := d.len
143 unsafe {
144 if d.deletes != 0 {
145 d.all_deleted[push_index] = 0
146 }
147 }
148 d.len++
149 return push_index
150}
151
152type MapHashFn = fn (voidptr) u64
153
154type MapEqFn = fn (voidptr, voidptr) bool
155
156type MapCloneFn = fn (voidptr, voidptr)
157
158type MapFreeFn = fn (voidptr)
159
160// map is the internal representation of a V `map` type.
161pub struct map {
162 // Number of bytes of a key
163 key_bytes int
164 // Number of bytes of a value
165 value_bytes int
166mut:
167 // Highest even index in the hashtable
168 even_index u32
169 // Number of cached hashbits left for rehashing
170 cached_hashbits u8
171 // Used for right-shifting out used hashbits
172 shift u8
173 // Array storing key-values (ordered)
174 key_values DenseArray
175 // Pointer to meta-data:
176 // - Odd indices store kv_index.
177 // - Even indices store probe_count and hashbits.
178 metas &u32
179 // Extra metas that allows for no ranging when incrementing
180 // index in the hashmap
181 extra_metas u32
182 has_string_keys bool
183 hash_fn MapHashFn
184 key_eq_fn MapEqFn
185 clone_fn MapCloneFn
186 free_fn MapFreeFn
187pub mut:
188 // Number of key-values currently in the hashmap
189 len int
190}
191
192fn map_eq_string(a voidptr, b voidptr) bool {
193 return fast_string_eq(*unsafe { &string(a) }, *unsafe { &string(b) })
194}
195
196fn map_eq_int_1(a voidptr, b voidptr) bool {
197 return unsafe { *&u8(a) == *&u8(b) }
198}
199
200fn map_eq_int_2(a voidptr, b voidptr) bool {
201 return unsafe { *&u16(a) == *&u16(b) }
202}
203
204fn map_eq_int_4(a voidptr, b voidptr) bool {
205 return unsafe { *&u32(a) == *&u32(b) }
206}
207
208fn map_eq_int_8(a voidptr, b voidptr) bool {
209 return unsafe { *&u64(a) == *&u64(b) }
210}
211
212fn map_clone_string(dest voidptr, pkey voidptr) {
213 unsafe {
214 s := *&string(pkey)
215 (*&string(dest)) = s.clone()
216 }
217}
218
219fn map_clone_int_1(dest voidptr, pkey voidptr) {
220 unsafe {
221 *&u8(dest) = *&u8(pkey)
222 }
223}
224
225fn map_clone_int_2(dest voidptr, pkey voidptr) {
226 unsafe {
227 *&u16(dest) = *&u16(pkey)
228 }
229}
230
231fn map_clone_int_4(dest voidptr, pkey voidptr) {
232 unsafe {
233 *&u32(dest) = *&u32(pkey)
234 }
235}
236
237fn map_clone_int_8(dest voidptr, pkey voidptr) {
238 unsafe {
239 *&u64(dest) = *&u64(pkey)
240 }
241}
242
243fn map_free_string(pkey voidptr) {
244 unsafe {
245 (*&string(pkey)).free()
246 }
247}
248
249fn map_free_nop(_ voidptr) {
250}
251
252fn new_map(key_bytes int, value_bytes int, hash_fn MapHashFn, key_eq_fn MapEqFn, clone_fn MapCloneFn, free_fn MapFreeFn) map {
253 metasize := int(sizeof(u32) * (init_capicity + extra_metas_inc))
254 // for now assume anything bigger than a pointer is a string
255 has_string_keys := key_bytes > sizeof(voidptr)
256 return map{
257 key_bytes: key_bytes
258 value_bytes: value_bytes
259 even_index: init_even_index
260 cached_hashbits: max_cached_hashbits
261 shift: init_log_capicity
262 key_values: new_dense_array(key_bytes, value_bytes)
263 metas: unsafe { &u32(vcalloc_noscan(metasize)) }
264 extra_metas: extra_metas_inc
265 len: 0
266 has_string_keys: has_string_keys
267 hash_fn: hash_fn
268 key_eq_fn: key_eq_fn
269 clone_fn: clone_fn
270 free_fn: free_fn
271 }
272}
273
274fn new_map_init(hash_fn MapHashFn, key_eq_fn MapEqFn, clone_fn MapCloneFn, free_fn MapFreeFn, n int, key_bytes int, value_bytes int, keys voidptr, values voidptr) map {
275 mut out := new_map(key_bytes, value_bytes, hash_fn, key_eq_fn, clone_fn, free_fn)
276 // TODO pre-allocate n slots
277 mut pkey := &u8(keys)
278 mut pval := &u8(values)
279 for _ in 0 .. n {
280 unsafe {
281 out.set(pkey, pval)
282 pkey = pkey + key_bytes
283 pval = pval + value_bytes
284 }
285 }
286 return out
287}
288
289// move moves the map to a new location in memory.
290// It does this by copying to a new location, then setting the
291// old location to all `0` with `vmemset`
292pub fn (mut m map) move() map {
293 r := *m
294 unsafe {
295 vmemset(m, 0, int(sizeof(map)))
296 }
297 return r
298}
299
300// clear clears the map without deallocating the allocated data.
301// It does it by setting the map length to `0`
302// Example: a.clear() // `a.len` and `a.key_values.len` is now 0
303pub fn (mut m map) clear() {
304 m.len = 0
305 m.even_index = 0
306 m.key_values.len = 0
307}
308
309[inline]
310fn (m &map) key_to_index(pkey voidptr) (u32, u32) {
311 hash := m.hash_fn(pkey)
312 index := hash & m.even_index
313 meta := ((hash >> m.shift) & hash_mask) | probe_inc
314 return u32(index), u32(meta)
315}
316
317[inline]
318fn (m &map) meta_less(_index u32, _metas u32) (u32, u32) {
319 mut index := _index
320 mut meta := _metas
321 for meta < unsafe { m.metas[index] } {
322 index += 2
323 meta += probe_inc
324 }
325 return index, meta
326}
327
328[inline]
329fn (mut m map) meta_greater(_index u32, _metas u32, kvi u32) {
330 mut meta := _metas
331 mut index := _index
332 mut kv_index := kvi
333 for unsafe { m.metas[index] } != 0 {
334 if meta > unsafe { m.metas[index] } {
335 unsafe {
336 tmp_meta := m.metas[index]
337 m.metas[index] = meta
338 meta = tmp_meta
339 tmp_index := m.metas[index + 1]
340 m.metas[index + 1] = kv_index
341 kv_index = tmp_index
342 }
343 }
344 index += 2
345 meta += probe_inc
346 }
347 unsafe {
348 m.metas[index] = meta
349 m.metas[index + 1] = kv_index
350 }
351 probe_count := (meta >> hashbits) - 1
352 m.ensure_extra_metas(probe_count)
353}
354
355[inline]
356fn (mut m map) ensure_extra_metas(probe_count u32) {
357 if (probe_count << 1) == m.extra_metas {
358 size_of_u32 := sizeof(u32)
359 old_mem_size := (m.even_index + 2 + m.extra_metas)
360 m.extra_metas += extra_metas_inc
361 mem_size := (m.even_index + 2 + m.extra_metas)
362 unsafe {
363 x := realloc_data(&u8(m.metas), int(size_of_u32 * old_mem_size), int(size_of_u32 * mem_size))
364 m.metas = &u32(x)
365 vmemset(m.metas + mem_size - extra_metas_inc, 0, int(sizeof(u32) * extra_metas_inc))
366 }
367 // Should almost never happen
368 if probe_count == 252 {
369 panic('Probe overflow')
370 }
371 }
372}
373
374// Insert new element to the map. The element is inserted if its key is
375// not equivalent to the key of any other element already in the container.
376// If the key already exists, its value is changed to the value of the new element.
377fn (mut m map) set(key voidptr, value voidptr) {
378 load_factor := f32(u32(m.len) << 1) / f32(m.even_index)
379 if load_factor > max_load_factor {
380 m.expand()
381 }
382 mut index, mut meta := m.key_to_index(key)
383 index, meta = m.meta_less(index, meta)
384 // While we might have a match
385 for meta == unsafe { m.metas[index] } {
386 kv_index := int(unsafe { m.metas[index + 1] })
387 pkey := unsafe { m.key_values.key(kv_index) }
388 if m.key_eq_fn(key, pkey) {
389 unsafe {
390 pval := m.key_values.value(kv_index)
391 vmemcpy(pval, value, m.value_bytes)
392 }
393 return
394 }
395 index += 2
396 meta += probe_inc
397 }
398 kv_index := m.key_values.expand()
399 unsafe {
400 pkey := m.key_values.key(kv_index)
401 pvalue := m.key_values.value(kv_index)
402 m.clone_fn(pkey, key)
403 vmemcpy(&u8(pvalue), value, m.value_bytes)
404 }
405 m.meta_greater(index, meta, u32(kv_index))
406 m.len++
407}
408
409// Doubles the size of the hashmap
410fn (mut m map) expand() {
411 old_cap := m.even_index
412 m.even_index = ((m.even_index + 2) << 1) - 2
413 // Check if any hashbits are left
414 if m.cached_hashbits == 0 {
415 m.shift += max_cached_hashbits
416 m.cached_hashbits = max_cached_hashbits
417 m.rehash()
418 } else {
419 m.cached_rehash(old_cap)
420 m.cached_hashbits--
421 }
422}
423
424// A rehash is the reconstruction of the hash table:
425// All the elements in the container are rearranged according
426// to their hash value into the newly sized key-value container.
427// Rehashes are performed when the load_factor is going to surpass
428// the max_load_factor in an operation.
429fn (mut m map) rehash() {
430 meta_bytes := sizeof(u32) * (m.even_index + 2 + m.extra_metas)
431 m.reserve(meta_bytes)
432}
433
434// reserve memory for the map meta data
435pub fn (mut m map) reserve(meta_bytes u32) {
436 unsafe {
437 // TODO: use realloc_data here too
438 x := v_realloc(&u8(m.metas), int(meta_bytes))
439 m.metas = &u32(x)
440 vmemset(m.metas, 0, int(meta_bytes))
441 }
442 for i := 0; i < m.key_values.len; i++ {
443 if !m.key_values.has_index(i) {
444 continue
445 }
446 pkey := unsafe { m.key_values.key(i) }
447 mut index, mut meta := m.key_to_index(pkey)
448 index, meta = m.meta_less(index, meta)
449 m.meta_greater(index, meta, u32(i))
450 }
451}
452
453// This method works like rehash. However, instead of rehashing the
454// key completely, it uses the bits cached in `metas`.
455fn (mut m map) cached_rehash(old_cap u32) {
456 old_metas := m.metas
457 metasize := int(sizeof(u32) * (m.even_index + 2 + m.extra_metas))
458 m.metas = unsafe { &u32(vcalloc(metasize)) }
459 old_extra_metas := m.extra_metas
460 for i := u32(0); i <= old_cap + old_extra_metas; i += 2 {
461 if unsafe { old_metas[i] } == 0 {
462 continue
463 }
464 old_meta := unsafe { old_metas[i] }
465 old_probe_count := ((old_meta >> hashbits) - 1) << 1
466 old_index := (i - old_probe_count) & (m.even_index >> 1)
467 mut index := (old_index | (old_meta << m.shift)) & m.even_index
468 mut meta := (old_meta & hash_mask) | probe_inc
469 index, meta = m.meta_less(index, meta)
470 kv_index := unsafe { old_metas[i + 1] }
471 m.meta_greater(index, meta, kv_index)
472 }
473 unsafe { free(old_metas) }
474}
475
476// This method is used for assignment operators. If the argument-key
477// does not exist in the map, it's added to the map along with the zero/default value.
478// If the key exists, its respective value is returned.
479fn (mut m map) get_and_set(key voidptr, zero voidptr) voidptr {
480 for {
481 mut index, mut meta := m.key_to_index(key)
482 for {
483 if meta == unsafe { m.metas[index] } {
484 kv_index := int(unsafe { m.metas[index + 1] })
485 pkey := unsafe { m.key_values.key(kv_index) }
486 if m.key_eq_fn(key, pkey) {
487 pval := unsafe { m.key_values.value(kv_index) }
488 return unsafe { &u8(pval) }
489 }
490 }
491 index += 2
492 meta += probe_inc
493 if meta > unsafe { m.metas[index] } {
494 break
495 }
496 }
497 // Key not found, insert key with zero-value
498 m.set(key, zero)
499 }
500 return unsafe { nil }
501}
502
503// If `key` matches the key of an element in the container,
504// the method returns a reference to its mapped value.
505// If not, a zero/default value is returned.
506fn (m &map) get(key voidptr, zero voidptr) voidptr {
507 mut index, mut meta := m.key_to_index(key)
508 for {
509 if meta == unsafe { m.metas[index] } {
510 kv_index := int(unsafe { m.metas[index + 1] })
511 pkey := unsafe { m.key_values.key(kv_index) }
512 if m.key_eq_fn(key, pkey) {
513 pval := unsafe { m.key_values.value(kv_index) }
514 return unsafe { &u8(pval) }
515 }
516 }
517 index += 2
518 meta += probe_inc
519 if meta > unsafe { m.metas[index] } {
520 break
521 }
522 }
523 return zero
524}
525
526// If `key` matches the key of an element in the container,
527// the method returns a reference to its mapped value.
528// If not, a zero pointer is returned.
529// This is used in `x := m['key'] or { ... }`
530fn (m &map) get_check(key voidptr) voidptr {
531 mut index, mut meta := m.key_to_index(key)
532 for {
533 if meta == unsafe { m.metas[index] } {
534 kv_index := int(unsafe { m.metas[index + 1] })
535 pkey := unsafe { m.key_values.key(kv_index) }
536 if m.key_eq_fn(key, pkey) {
537 pval := unsafe { m.key_values.value(kv_index) }
538 return unsafe { &u8(pval) }
539 }
540 }
541 index += 2
542 meta += probe_inc
543 if meta > unsafe { m.metas[index] } {
544 break
545 }
546 }
547 return 0
548}
549
550// Checks whether a particular key exists in the map.
551fn (m &map) exists(key voidptr) bool {
552 mut index, mut meta := m.key_to_index(key)
553 for {
554 if meta == unsafe { m.metas[index] } {
555 kv_index := int(unsafe { m.metas[index + 1] })
556 pkey := unsafe { m.key_values.key(kv_index) }
557 if m.key_eq_fn(key, pkey) {
558 return true
559 }
560 }
561 index += 2
562 meta += probe_inc
563 if meta > unsafe { m.metas[index] } {
564 break
565 }
566 }
567 return false
568}
569
570[inline]
571fn (mut d DenseArray) delete(i int) {
572 if d.deletes == 0 {
573 d.all_deleted = vcalloc(d.cap) // sets to 0
574 }
575 d.deletes++
576 unsafe {
577 d.all_deleted[i] = 1
578 }
579}
580
581// delete removes the mapping of a particular key from the map.
582[unsafe]
583pub fn (mut m map) delete(key voidptr) {
584 mut index, mut meta := m.key_to_index(key)
585 index, meta = m.meta_less(index, meta)
586 // Perform backwards shifting
587 for meta == unsafe { m.metas[index] } {
588 kv_index := int(unsafe { m.metas[index + 1] })
589 pkey := unsafe { m.key_values.key(kv_index) }
590 if m.key_eq_fn(key, pkey) {
591 for (unsafe { m.metas[index + 2] } >> hashbits) > 1 {
592 unsafe {
593 m.metas[index] = m.metas[index + 2] - probe_inc
594 m.metas[index + 1] = m.metas[index + 3]
595 }
596 index += 2
597 }
598 m.len--
599 m.key_values.delete(kv_index)
600 unsafe {
601 m.metas[index] = 0
602 m.free_fn(pkey)
603 // Mark key as deleted
604 vmemset(pkey, 0, m.key_bytes)
605 }
606 if m.key_values.len <= 32 {
607 return
608 }
609 // Clean up key_values if too many have been deleted
610 if m.key_values.deletes >= (m.key_values.len >> 1) {
611 m.key_values.zeros_to_end()
612 m.rehash()
613 }
614 return
615 }
616 index += 2
617 meta += probe_inc
618 }
619}
620
621// keys returns all keys in the map.
622pub fn (m &map) keys() array {
623 mut keys := __new_array(m.len, 0, m.key_bytes)
624 mut item := unsafe { &u8(keys.data) }
625 if m.key_values.deletes == 0 {
626 for i := 0; i < m.key_values.len; i++ {
627 unsafe {
628 pkey := m.key_values.key(i)
629 m.clone_fn(item, pkey)
630 item = item + m.key_bytes
631 }
632 }
633 return keys
634 }
635 for i := 0; i < m.key_values.len; i++ {
636 if !m.key_values.has_index(i) {
637 continue
638 }
639 unsafe {
640 pkey := m.key_values.key(i)
641 m.clone_fn(item, pkey)
642 item = item + m.key_bytes
643 }
644 }
645 return keys
646}
647
648// values returns all values in the map.
649pub fn (m &map) values() array {
650 mut values := __new_array(m.len, 0, m.value_bytes)
651 mut item := unsafe { &u8(values.data) }
652
653 if m.key_values.deletes == 0 {
654 unsafe {
655 vmemcpy(item, m.key_values.values, m.value_bytes * m.key_values.len)
656 }
657 return values
658 }
659
660 for i := 0; i < m.key_values.len; i++ {
661 if !m.key_values.has_index(i) {
662 continue
663 }
664 unsafe {
665 pvalue := m.key_values.value(i)
666 vmemcpy(item, pvalue, m.value_bytes)
667 item = item + m.value_bytes
668 }
669 }
670 return values
671}
672
673// warning: only copies keys, does not clone
674[unsafe]
675fn (d &DenseArray) clone() DenseArray {
676 res := DenseArray{
677 key_bytes: d.key_bytes
678 value_bytes: d.value_bytes
679 cap: d.cap
680 len: d.len
681 deletes: d.deletes
682 all_deleted: 0
683 values: 0
684 keys: 0
685 }
686 unsafe {
687 if d.deletes != 0 {
688 res.all_deleted = memdup(d.all_deleted, d.cap)
689 }
690 res.keys = memdup(d.keys, d.cap * d.key_bytes)
691 res.values = memdup(d.values, d.cap * d.value_bytes)
692 }
693 return res
694}
695
696// clone returns a clone of the `map`.
697[unsafe]
698pub fn (m &map) clone() map {
699 metasize := int(sizeof(u32) * (m.even_index + 2 + m.extra_metas))
700 res := map{
701 key_bytes: m.key_bytes
702 value_bytes: m.value_bytes
703 even_index: m.even_index
704 cached_hashbits: m.cached_hashbits
705 shift: m.shift
706 key_values: unsafe { m.key_values.clone() }
707 metas: unsafe { &u32(malloc_noscan(metasize)) }
708 extra_metas: m.extra_metas
709 len: m.len
710 has_string_keys: m.has_string_keys
711 hash_fn: m.hash_fn
712 key_eq_fn: m.key_eq_fn
713 clone_fn: m.clone_fn
714 free_fn: m.free_fn
715 }
716 unsafe { vmemcpy(res.metas, m.metas, metasize) }
717 if !m.has_string_keys {
718 return res
719 }
720 // clone keys
721 for i in 0 .. m.key_values.len {
722 if !m.key_values.has_index(i) {
723 continue
724 }
725 m.clone_fn(res.key_values.key(i), m.key_values.key(i))
726 }
727 return res
728}
729
730// free releases all memory resources occupied by the `map`.
731[unsafe]
732pub fn (m &map) free() {
733 unsafe { free(m.metas) }
734 unsafe {
735 m.metas = nil
736 }
737 if m.key_values.deletes == 0 {
738 for i := 0; i < m.key_values.len; i++ {
739 unsafe {
740 pkey := m.key_values.key(i)
741 m.free_fn(pkey)
742 vmemset(pkey, 0, m.key_bytes)
743 }
744 }
745 } else {
746 for i := 0; i < m.key_values.len; i++ {
747 if !m.key_values.has_index(i) {
748 continue
749 }
750 unsafe {
751 pkey := m.key_values.key(i)
752 m.free_fn(pkey)
753 vmemset(pkey, 0, m.key_bytes)
754 }
755 }
756 }
757 unsafe {
758 if m.key_values.all_deleted != nil {
759 free(m.key_values.all_deleted)
760 m.key_values.all_deleted = nil
761 }
762 if m.key_values.keys != nil {
763 free(m.key_values.keys)
764 m.key_values.keys = nil
765 }
766 if m.key_values.values != nil {
767 free(m.key_values.values)
768 m.key_values.values = nil
769 }
770 // TODO: the next lines assume that callback functions are static and independent from each particular
771 // map instance. Closures may invalidate that assumption, so revisit when RC for closures works.
772 m.hash_fn = nil
773 m.key_eq_fn = nil
774 m.clone_fn = nil
775 m.free_fn = nil
776 }
777}