Skip to content

Commit 9980b70

Browse files
committedSep 13, 2016
runtime: limit the number of map overflow buckets
Consider repeatedly adding many items to a map and then deleting them all, as in #16070. The map itself doesn't need to grow above the high water mark of number of items. However, due to random collisions, the map can accumulate overflow buckets. Prior to this CL, those overflow buckets were never removed, which led to a slow memory leak. The problem with removing overflow buckets is iterators. The obvious approach is to repack keys and values and eliminate unused overflow buckets. However, keys, values, and overflow buckets cannot be manipulated without disrupting iterators. This CL takes a different approach, which is to reuse the existing map growth mechanism, which is well established, well tested, and safe in the presence of iterators. When a map has accumulated enough overflow buckets we trigger map growth, but grow into a map of the same size as before. The old overflow buckets will be left behind for garbage collection. For the code in #16070, instead of climbing (very slowly) forever, memory usage now cycles between 264mb and 483mb every 15 minutes or so. To avoid increasing the size of maps, the overflow bucket counter is only 16 bits. For large maps, the counter is incremented stochastically. Fixes #16070 Change-Id: If551d77613ec6836907efca58bda3deee304297e Reviewed-on: https://go-review.googlesource.com/25049 Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
1 parent 0cd3ecb commit 9980b70

File tree

3 files changed

+212
-78
lines changed

3 files changed

+212
-78
lines changed
 

‎src/cmd/compile/internal/gc/reflect.go

+12-10
Original file line numberDiff line numberDiff line change
@@ -182,20 +182,22 @@ func hmap(t *Type) *Type {
182182
}
183183

184184
bucket := mapbucket(t)
185-
var field [8]*Field
186-
field[0] = makefield("count", Types[TINT])
187-
field[1] = makefield("flags", Types[TUINT8])
188-
field[2] = makefield("B", Types[TUINT8])
189-
field[3] = makefield("hash0", Types[TUINT32])
190-
field[4] = makefield("buckets", Ptrto(bucket))
191-
field[5] = makefield("oldbuckets", Ptrto(bucket))
192-
field[6] = makefield("nevacuate", Types[TUINTPTR])
193-
field[7] = makefield("overflow", Types[TUNSAFEPTR])
185+
fields := []*Field{
186+
makefield("count", Types[TINT]),
187+
makefield("flags", Types[TUINT8]),
188+
makefield("B", Types[TUINT8]),
189+
makefield("noverflow", Types[TUINT16]),
190+
makefield("hash0", Types[TUINT32]),
191+
makefield("buckets", Ptrto(bucket)),
192+
makefield("oldbuckets", Ptrto(bucket)),
193+
makefield("nevacuate", Types[TUINTPTR]),
194+
makefield("overflow", Types[TUNSAFEPTR]),
195+
}
194196

195197
h := typ(TSTRUCT)
196198
h.Noalg = true
197199
h.Local = t.Local
198-
h.SetFields(field[:])
200+
h.SetFields(fields)
199201
dowidth(h)
200202
t.MapType().Hmap = h
201203
h.StructType().Map = t

‎src/runtime/hashmap.go

+170-62
Original file line numberDiff line numberDiff line change
@@ -93,9 +93,10 @@ const (
9393
minTopHash = 4 // minimum tophash for a normal filled cell.
9494

9595
// flags
96-
iterator = 1 // there may be an iterator using buckets
97-
oldIterator = 2 // there may be an iterator using oldbuckets
98-
hashWriting = 4 // a goroutine is writing to the map
96+
iterator = 1 // there may be an iterator using buckets
97+
oldIterator = 2 // there may be an iterator using oldbuckets
98+
hashWriting = 4 // a goroutine is writing to the map
99+
sameSizeGrow = 8 // the current map growth is to a new map of the same size
99100

100101
// sentinel bucket ID for iterator checks
101102
noCheck = 1<<(8*sys.PtrSize) - 1
@@ -105,10 +106,11 @@ const (
105106
type hmap struct {
106107
// Note: the format of the Hmap is encoded in ../../cmd/internal/gc/reflect.go and
107108
// ../reflect/type.go. Don't change this structure without also changing that code!
108-
count int // # live cells == size of map. Must be first (used by len() builtin)
109-
flags uint8
110-
B uint8 // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
111-
hash0 uint32 // hash seed
109+
count int // # live cells == size of map. Must be first (used by len() builtin)
110+
flags uint8
111+
B uint8 // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
112+
noverflow uint16 // approximate number of overflow buckets; see incrnoverflow for details
113+
hash0 uint32 // hash seed
112114

113115
buckets unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
114116
oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
@@ -165,7 +167,34 @@ func (b *bmap) overflow(t *maptype) *bmap {
165167
return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize))
166168
}
167169

170+
// incrnoverflow increments h.noverflow.
171+
// noverflow counts the number of overflow buckets.
172+
// This is used to trigger same-size map growth.
173+
// See also tooManyOverflowBuckets.
174+
// To keep hmap small, noverflow is a uint16.
175+
// When there are few buckets, noverflow is an exact count.
176+
// When there are many buckets, noverflow is an approximate count.
177+
func (h *hmap) incrnoverflow() {
178+
// We trigger same-size map growth if there are
179+
// as many overflow buckets as buckets.
180+
// We need to be able to count to 1<<h.B.
181+
if h.B < 16 {
182+
h.noverflow++
183+
return
184+
}
185+
// Increment with probability 1/(1<<(h.B-15)).
186+
// When we reach 1<<15 - 1, we will have approximately
187+
// as many overflow buckets as buckets.
188+
mask := uint32(1)<<(h.B-15) - 1
189+
// Example: if h.B == 18, then mask == 7,
190+
// and fastrand & 7 == 0 with probability 1/8.
191+
if fastrand()&mask == 0 {
192+
h.noverflow++
193+
}
194+
}
195+
168196
func (h *hmap) setoverflow(t *maptype, b, ovf *bmap) {
197+
h.incrnoverflow()
169198
if t.bucket.kind&kindNoPointers != 0 {
170199
h.createOverflow()
171200
*h.overflow[0] = append(*h.overflow[0], ovf)
@@ -238,7 +267,7 @@ func makemap(t *maptype, hint int64, h *hmap, bucket unsafe.Pointer) *hmap {
238267

239268
// find size parameter which will hold the requested # of elements
240269
B := uint8(0)
241-
for ; hint > bucketCnt && float32(hint) > loadFactor*float32(uintptr(1)<<B); B++ {
270+
for ; overLoadFactor(hint, B); B++ {
242271
}
243272

244273
// allocate initial hash table
@@ -260,6 +289,7 @@ func makemap(t *maptype, hint int64, h *hmap, bucket unsafe.Pointer) *hmap {
260289
h.buckets = buckets
261290
h.oldbuckets = nil
262291
h.nevacuate = 0
292+
h.noverflow = 0
263293

264294
return h
265295
}
@@ -290,7 +320,11 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
290320
m := uintptr(1)<<h.B - 1
291321
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
292322
if c := h.oldbuckets; c != nil {
293-
oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
323+
if !h.sameSizeGrow() {
324+
// There used to be half as many buckets; mask down one more power of two.
325+
m >>= 1
326+
}
327+
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
294328
if !evacuated(oldb) {
295329
b = oldb
296330
}
@@ -344,7 +378,11 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
344378
m := uintptr(1)<<h.B - 1
345379
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
346380
if c := h.oldbuckets; c != nil {
347-
oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize)))
381+
if !h.sameSizeGrow() {
382+
// There used to be half as many buckets; mask down one more power of two.
383+
m >>= 1
384+
}
385+
oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&m)*uintptr(t.bucketsize)))
348386
if !evacuated(oldb) {
349387
b = oldb
350388
}
@@ -387,7 +425,11 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe
387425
m := uintptr(1)<<h.B - 1
388426
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
389427
if c := h.oldbuckets; c != nil {
390-
oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize)))
428+
if !h.sameSizeGrow() {
429+
// There used to be half as many buckets; mask down one more power of two.
430+
m >>= 1
431+
}
432+
oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&m)*uintptr(t.bucketsize)))
391433
if !evacuated(oldb) {
392434
b = oldb
393435
}
@@ -465,7 +507,7 @@ func mapassign1(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
465507

466508
again:
467509
bucket := hash & (uintptr(1)<<h.B - 1)
468-
if h.oldbuckets != nil {
510+
if h.growing() {
469511
growWork(t, h, bucket)
470512
}
471513
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
@@ -514,8 +556,11 @@ again:
514556
b = ovf
515557
}
516558

517-
// did not find mapping for key. Allocate new cell & add entry.
518-
if float32(h.count) >= loadFactor*float32((uintptr(1)<<h.B)) && h.count >= bucketCnt {
559+
// Did not find mapping for key. Allocate new cell & add entry.
560+
561+
// If we hit the max load factor or we have too many overflow buckets,
562+
// and we're not already in the middle of growing, start growing.
563+
if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
519564
hashGrow(t, h)
520565
goto again // Growing the table invalidates everything, so try again
521566
}
@@ -573,7 +618,7 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
573618
alg := t.key.alg
574619
hash := alg.hash(key, uintptr(h.hash0))
575620
bucket := hash & (uintptr(1)<<h.B - 1)
576-
if h.oldbuckets != nil {
621+
if h.growing() {
577622
growWork(t, h, bucket)
578623
}
579624
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
@@ -700,12 +745,12 @@ next:
700745
it.value = nil
701746
return
702747
}
703-
if h.oldbuckets != nil && it.B == h.B {
748+
if h.growing() && it.B == h.B {
704749
// Iterator was started in the middle of a grow, and the grow isn't done yet.
705750
// If the bucket we're looking at hasn't been filled in yet (i.e. the old
706751
// bucket hasn't been evacuated) then we need to iterate through the old
707752
// bucket and only return the ones that will be migrated to this bucket.
708-
oldbucket := bucket & (uintptr(1)<<(it.B-1) - 1)
753+
oldbucket := bucket & it.h.oldbucketmask()
709754
b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
710755
if !evacuated(b) {
711756
checkBucket = bucket
@@ -729,9 +774,9 @@ next:
729774
k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
730775
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize))
731776
if b.tophash[offi] != empty && b.tophash[offi] != evacuatedEmpty {
732-
if checkBucket != noCheck {
733-
// Special case: iterator was started during a grow and the
734-
// grow is not done yet. We're working on a bucket whose
777+
if checkBucket != noCheck && !h.sameSizeGrow() {
778+
// Special case: iterator was started during a grow to a larger size
779+
// and the grow is not done yet. We're working on a bucket whose
735780
// oldbucket has not been evacuated yet. Or at least, it wasn't
736781
// evacuated when we started the bucket. So we're iterating
737782
// through the oldbucket, skipping any keys that will go
@@ -817,21 +862,27 @@ next:
817862
}
818863

819864
func hashGrow(t *maptype, h *hmap) {
820-
if h.oldbuckets != nil {
821-
throw("evacuation not done in time")
865+
// If we've hit the load factor, get bigger.
866+
// Otherwise, there are too many overflow buckets,
867+
// so keep the same number of buckets and "grow" laterally.
868+
bigger := uint8(1)
869+
if !overLoadFactor(int64(h.count), h.B) {
870+
bigger = 0
871+
h.flags |= sameSizeGrow
822872
}
823873
oldbuckets := h.buckets
824-
newbuckets := newarray(t.bucket, 1<<(h.B+1))
874+
newbuckets := newarray(t.bucket, 1<<(h.B+bigger))
825875
flags := h.flags &^ (iterator | oldIterator)
826876
if h.flags&iterator != 0 {
827877
flags |= oldIterator
828878
}
829879
// commit the grow (atomic wrt gc)
830-
h.B++
880+
h.B += bigger
831881
h.flags = flags
832882
h.oldbuckets = oldbuckets
833883
h.buckets = newbuckets
834884
h.nevacuate = 0
885+
h.noverflow = 0
835886

836887
if h.overflow != nil {
837888
// Promote current overflow buckets to the old generation.
@@ -846,35 +897,87 @@ func hashGrow(t *maptype, h *hmap) {
846897
// by growWork() and evacuate().
847898
}
848899

849-
func growWork(t *maptype, h *hmap, bucket uintptr) {
850-
noldbuckets := uintptr(1) << (h.B - 1)
900+
// overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor.
901+
func overLoadFactor(count int64, B uint8) bool {
902+
// TODO: rewrite to use integer math and comparison?
903+
return count >= bucketCnt && float32(count) >= loadFactor*float32((uintptr(1)<<B))
904+
}
905+
906+
// tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets.
907+
// Note that most of these overflow buckets must be in sparse use;
908+
// if use was dense, then we'd have already triggered regular map growth.
909+
func tooManyOverflowBuckets(noverflow uint16, B uint8) bool {
910+
// If the threshold is too low, we do extraneous work.
911+
// If the threshold is too high, maps that grow and shrink can hold on to lots of unused memory.
912+
// "too many" means (approximately) as many overflow buckets as regular buckets.
913+
// See incrnoverflow for more details.
914+
if B < 16 {
915+
return noverflow >= uint16(1)<<B
916+
}
917+
return noverflow >= 1<<15
918+
}
851919

920+
// growing reports whether h is growing. The growth may be to the same size or bigger.
921+
func (h *hmap) growing() bool {
922+
return h.oldbuckets != nil
923+
}
924+
925+
// sameSizeGrow reports whether the current growth is to a map of the same size.
926+
func (h *hmap) sameSizeGrow() bool {
927+
return h.flags&sameSizeGrow != 0
928+
}
929+
930+
// noldbuckets calculates the number of buckets prior to the current map growth.
931+
func (h *hmap) noldbuckets() uintptr {
932+
oldB := h.B
933+
if !h.sameSizeGrow() {
934+
oldB--
935+
}
936+
return uintptr(1) << oldB
937+
}
938+
939+
// oldbucketmask provides a mask that can be applied to calculate n % noldbuckets().
940+
func (h *hmap) oldbucketmask() uintptr {
941+
return h.noldbuckets() - 1
942+
}
943+
944+
func growWork(t *maptype, h *hmap, bucket uintptr) {
852945
// make sure we evacuate the oldbucket corresponding
853946
// to the bucket we're about to use
854-
evacuate(t, h, bucket&(noldbuckets-1))
947+
evacuate(t, h, bucket&h.oldbucketmask())
855948

856949
// evacuate one more oldbucket to make progress on growing
857-
if h.oldbuckets != nil {
950+
if h.growing() {
858951
evacuate(t, h, h.nevacuate)
859952
}
860953
}
861954

862955
func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
863956
b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
864-
newbit := uintptr(1) << (h.B - 1)
957+
newbit := h.noldbuckets()
865958
alg := t.key.alg
866959
if !evacuated(b) {
867960
// TODO: reuse overflow buckets instead of using new ones, if there
868961
// is no iterator using the old buckets. (If !oldIterator.)
869962

870-
x := (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
871-
y := (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
872-
xi := 0
873-
yi := 0
874-
xk := add(unsafe.Pointer(x), dataOffset)
875-
yk := add(unsafe.Pointer(y), dataOffset)
876-
xv := add(xk, bucketCnt*uintptr(t.keysize))
877-
yv := add(yk, bucketCnt*uintptr(t.keysize))
963+
var (
964+
x, y *bmap // current low/high buckets in new map
965+
xi, yi int // key/val indices into x and y
966+
xk, yk unsafe.Pointer // pointers to current x and y key storage
967+
xv, yv unsafe.Pointer // pointers to current x and y value storage
968+
)
969+
x = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
970+
xi = 0
971+
xk = add(unsafe.Pointer(x), dataOffset)
972+
xv = add(xk, bucketCnt*uintptr(t.keysize))
973+
if !h.sameSizeGrow() {
974+
// Only calculate y pointers if we're growing bigger.
975+
// Otherwise GC can see bad pointers.
976+
y = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
977+
yi = 0
978+
yk = add(unsafe.Pointer(y), dataOffset)
979+
yv = add(yk, bucketCnt*uintptr(t.keysize))
980+
}
878981
for ; b != nil; b = b.overflow(t) {
879982
k := add(unsafe.Pointer(b), dataOffset)
880983
v := add(k, bucketCnt*uintptr(t.keysize))
@@ -891,34 +994,38 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
891994
if t.indirectkey {
892995
k2 = *((*unsafe.Pointer)(k2))
893996
}
894-
// Compute hash to make our evacuation decision (whether we need
895-
// to send this key/value to bucket x or bucket y).
896-
hash := alg.hash(k2, uintptr(h.hash0))
897-
if h.flags&iterator != 0 {
898-
if !t.reflexivekey && !alg.equal(k2, k2) {
899-
// If key != key (NaNs), then the hash could be (and probably
900-
// will be) entirely different from the old hash. Moreover,
901-
// it isn't reproducible. Reproducibility is required in the
902-
// presence of iterators, as our evacuation decision must
903-
// match whatever decision the iterator made.
904-
// Fortunately, we have the freedom to send these keys either
905-
// way. Also, tophash is meaningless for these kinds of keys.
906-
// We let the low bit of tophash drive the evacuation decision.
907-
// We recompute a new random tophash for the next level so
908-
// these keys will get evenly distributed across all buckets
909-
// after multiple grows.
910-
if (top & 1) != 0 {
911-
hash |= newbit
912-
} else {
913-
hash &^= newbit
914-
}
915-
top = uint8(hash >> (sys.PtrSize*8 - 8))
916-
if top < minTopHash {
917-
top += minTopHash
997+
useX := true
998+
if !h.sameSizeGrow() {
999+
// Compute hash to make our evacuation decision (whether we need
1000+
// to send this key/value to bucket x or bucket y).
1001+
hash := alg.hash(k2, uintptr(h.hash0))
1002+
if h.flags&iterator != 0 {
1003+
if !t.reflexivekey && !alg.equal(k2, k2) {
1004+
// If key != key (NaNs), then the hash could be (and probably
1005+
// will be) entirely different from the old hash. Moreover,
1006+
// it isn't reproducible. Reproducibility is required in the
1007+
// presence of iterators, as our evacuation decision must
1008+
// match whatever decision the iterator made.
1009+
// Fortunately, we have the freedom to send these keys either
1010+
// way. Also, tophash is meaningless for these kinds of keys.
1011+
// We let the low bit of tophash drive the evacuation decision.
1012+
// We recompute a new random tophash for the next level so
1013+
// these keys will get evenly distributed across all buckets
1014+
// after multiple grows.
1015+
if top&1 != 0 {
1016+
hash |= newbit
1017+
} else {
1018+
hash &^= newbit
1019+
}
1020+
top = uint8(hash >> (sys.PtrSize*8 - 8))
1021+
if top < minTopHash {
1022+
top += minTopHash
1023+
}
9181024
}
9191025
}
1026+
useX = hash&newbit == 0
9201027
}
921-
if (hash & newbit) == 0 {
1028+
if useX {
9221029
b.tophash[i] = evacuatedX
9231030
if xi == bucketCnt {
9241031
newx := (*bmap)(newobject(t.bucket))
@@ -988,6 +1095,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
9881095
if h.overflow != nil {
9891096
h.overflow[1] = nil
9901097
}
1098+
h.flags &^= sameSizeGrow
9911099
}
9921100
}
9931101
}

‎src/runtime/hashmap_fast.go

+30-6
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,11 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
2929
m := uintptr(1)<<h.B - 1
3030
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
3131
if c := h.oldbuckets; c != nil {
32-
oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
32+
if !h.sameSizeGrow() {
33+
// There used to be half as many buckets; mask down one more power of two.
34+
m >>= 1
35+
}
36+
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
3337
if !evacuated(oldb) {
3438
b = oldb
3539
}
@@ -74,7 +78,11 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
7478
m := uintptr(1)<<h.B - 1
7579
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
7680
if c := h.oldbuckets; c != nil {
77-
oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
81+
if !h.sameSizeGrow() {
82+
// There used to be half as many buckets; mask down one more power of two.
83+
m >>= 1
84+
}
85+
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
7886
if !evacuated(oldb) {
7987
b = oldb
8088
}
@@ -119,7 +127,11 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
119127
m := uintptr(1)<<h.B - 1
120128
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
121129
if c := h.oldbuckets; c != nil {
122-
oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
130+
if !h.sameSizeGrow() {
131+
// There used to be half as many buckets; mask down one more power of two.
132+
m >>= 1
133+
}
134+
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
123135
if !evacuated(oldb) {
124136
b = oldb
125137
}
@@ -164,7 +176,11 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
164176
m := uintptr(1)<<h.B - 1
165177
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
166178
if c := h.oldbuckets; c != nil {
167-
oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
179+
if !h.sameSizeGrow() {
180+
// There used to be half as many buckets; mask down one more power of two.
181+
m >>= 1
182+
}
183+
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
168184
if !evacuated(oldb) {
169185
b = oldb
170186
}
@@ -264,7 +280,11 @@ dohash:
264280
m := uintptr(1)<<h.B - 1
265281
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
266282
if c := h.oldbuckets; c != nil {
267-
oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
283+
if !h.sameSizeGrow() {
284+
// There used to be half as many buckets; mask down one more power of two.
285+
m >>= 1
286+
}
287+
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
268288
if !evacuated(oldb) {
269289
b = oldb
270290
}
@@ -367,7 +387,11 @@ dohash:
367387
m := uintptr(1)<<h.B - 1
368388
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
369389
if c := h.oldbuckets; c != nil {
370-
oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
390+
if !h.sameSizeGrow() {
391+
// There used to be half as many buckets; mask down one more power of two.
392+
m >>= 1
393+
}
394+
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
371395
if !evacuated(oldb) {
372396
b = oldb
373397
}

0 commit comments

Comments
 (0)
Please sign in to comment.