1/*
2 * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 */
23
24import org.testng.annotations.DataProvider;
25import org.testng.annotations.Test;
26
27import java.util.Collection;
28import java.util.HashMap;
29import java.util.LinkedHashMap;
30import java.util.Map;
31import java.util.concurrent.ConcurrentHashMap;
32import java.util.function.BiConsumer;
33import java.util.stream.Collector;
34import java.util.stream.Collectors;
35import java.util.stream.IntStream;
36
37import static org.testng.Assert.assertEquals;
38
39/*
40 * @test
41 * @bug 8023463
42 * @summary Test the case where a bin is treeified and vice verser
43 * @run testng MapBinToFromTreeTest
44 */
45
46@Test
47public class MapBinToFromTreeTest {
48
49    // Initial capacity of map
50    // Should be >= the map capacity for treeifiying, see HashMap/ConcurrentMap.MIN_TREEIFY_CAPACITY
51    static final int INITIAL_CAPACITY = 64;
52
53    // Maximum size of map
54    // Should be > the treeify threshold, see HashMap/ConcurrentMap.TREEIFY_THRESHOLD
55    // Should be > INITIAL_CAPACITY to ensure resize occurs
56    static final int SIZE = 256;
57
58    // Load factor of map
59    // A value 1.0 will ensure that a new threshold == capacity
60    static final float LOAD_FACTOR = 1.0f;
61
62    @DataProvider(name = "maps")
63    static Object[][] mapProvider() {
64        return new Object[][] {
65                // Pass in the class name as a description for test reporting
66                // purposes
67                { HashMap.class.getName(), new HashMap(INITIAL_CAPACITY, LOAD_FACTOR) },
68                { LinkedHashMap.class.getName(), new LinkedHashMap(INITIAL_CAPACITY, LOAD_FACTOR) },
69                { ConcurrentHashMap.class.getName(), new ConcurrentHashMap(INITIAL_CAPACITY, LOAD_FACTOR) },
70        };
71    }
72
73    @Test(dataProvider = "maps")
74    public void testPutThenGet(String d, Map<HashCodeInteger, Integer> m) {
75        put(SIZE, m, (i, s) -> {
76            for (int j = 0; j < s; j++) {
77                assertEquals(m.get(new HashCodeInteger(j)).intValue(), j,
78                             String.format("Map.get(%d)", j));
79            }
80        });
81    }
82
83    @Test(dataProvider = "maps")
84    public void testPutThenTraverse(String d, Map<HashCodeInteger, Integer> m) {
85        Collector<Integer, ?, ? extends Collection<Integer>> c = getCollector(m);
86
87        put(SIZE, m, (i, s) -> {
88            // Note that it is OK to collect to a Set (HashSet) as long as
89            // integer values are used since these tests only check for
90            // collisions and other tests will verify more general functionality
91            Collection<Integer> actual = m.keySet().stream().map(e -> e.value).collect(c);
92            Collection<Integer> expected = IntStream.range(0, s).boxed().collect(c);
93            assertEquals(actual, expected, "Map.keySet()");
94        });
95    }
96
97    @Test(dataProvider = "maps")
98    public void testRemoveThenGet(String d, Map<HashCodeInteger, Integer> m) {
99        put(SIZE, m, (i, s) -> { });
100
101        remove(m, (i, s) -> {
102            for (int j = i + 1; j < SIZE; j++) {
103                assertEquals(m.get(new HashCodeInteger(j)).intValue(), j,
104                             String.format("Map.get(%d)", j));
105            }
106        });
107    }
108
109    @Test(dataProvider = "maps")
110    public void testRemoveThenTraverse(String d, Map<HashCodeInteger, Integer> m) {
111        put(SIZE, m, (i, s) -> { });
112
113        Collector<Integer, ?, ? extends Collection<Integer>> c = getCollector(m);
114
115        remove(m, (i, s) -> {
116            Collection<Integer> actual = m.keySet().stream().map(e -> e.value).collect(c);
117            Collection<Integer> expected = IntStream.range(i + 1, SIZE).boxed().collect(c);
118            assertEquals(actual, expected, "Map.keySet()");
119        });
120    }
121
122    @Test(dataProvider = "maps")
123    public void testUntreeifyOnResizeWithGet(String d, Map<HashCodeInteger, Integer> m) {
124        // Fill the map with 64 entries grouped into 4 buckets
125        put(INITIAL_CAPACITY, m, (i, s) -> { });
126
127        for (int i = INITIAL_CAPACITY; i < SIZE; i++) {
128            // Add further entries in the 0'th bucket so as not to disturb
129            // other buckets, entries of which may be distributed and/or
130            // the bucket untreeified on resize
131            m.put(new HashCodeInteger(i, 0), i);
132
133            for (int j = 0; j < INITIAL_CAPACITY; j++) {
134                assertEquals(m.get(new HashCodeInteger(j)).intValue(), j,
135                             String.format("Map.get(%d) < INITIAL_CAPACITY", j));
136            }
137            for (int j = INITIAL_CAPACITY; j <= i; j++) {
138                assertEquals(m.get(new HashCodeInteger(j, 0)).intValue(), j,
139                             String.format("Map.get(%d) >= INITIAL_CAPACITY", j));
140            }
141        }
142    }
143
144    @Test(dataProvider = "maps")
145    public void testUntreeifyOnResizeWithTraverse(String d, Map<HashCodeInteger, Integer> m) {
146        // Fill the map with 64 entries grouped into 4 buckets
147        put(INITIAL_CAPACITY, m, (i, s) -> { });
148
149        Collector<Integer, ?, ? extends Collection<Integer>> c = getCollector(m);
150
151        for (int i = INITIAL_CAPACITY; i < SIZE; i++) {
152            // Add further entries in the 0'th bucket so as not to disturb
153            // other buckets, entries of which may be distributed and/or
154            // the bucket untreeified on resize
155            m.put(new HashCodeInteger(i, 0), i);
156
157            Collection<Integer> actual = m.keySet().stream().map(e -> e.value).collect(c);
158            Collection<Integer> expected = IntStream.rangeClosed(0, i).boxed().collect(c);
159            assertEquals(actual, expected, "Key set");
160        }
161    }
162
163    Collector<Integer, ?, ? extends Collection<Integer>> getCollector(Map<?, ?> m) {
164        Collector<Integer, ?, ? extends Collection<Integer>> collector = m instanceof LinkedHashMap
165                                                                         ? Collectors.toList()
166                                                                         : Collectors.toSet();
167        return collector;
168    }
169
170    void put(int size, Map<HashCodeInteger, Integer> m, BiConsumer<Integer, Integer> c) {
171        for (int i = 0; i < size; i++) {
172            m.put(new HashCodeInteger(i), i);
173
174            c.accept(i, m.size());
175        }
176    }
177
178    void remove(Map<HashCodeInteger, Integer> m, BiConsumer<Integer, Integer> c) {
179        int size = m.size();
180        // Remove all elements thus ensuring at some point trees will be
181        // converting back to bins
182        for (int i = 0; i < size; i++) {
183            m.remove(new HashCodeInteger(i));
184
185            c.accept(i, m.size());
186        }
187    }
188
189    static final class HashCodeInteger implements Comparable<HashCodeInteger> {
190        final int value;
191
192        final int hashcode;
193
194        HashCodeInteger(int value) {
195            this(value, hash(value));
196        }
197
198        HashCodeInteger(int value, int hashcode) {
199            this.value = value;
200            this.hashcode = hashcode;
201        }
202
203        static int hash(int i) {
204            // Assuming 64 entries with keys from 0 to 63 then a map:
205            // - of capacity 64 will have 4 buckets with 16 entries per-bucket
206            // - of capacity 128 will have 8 buckets with 8 entries per-bucket
207            // - of capacity 256 will have 16 buckets with 4 entries per-bucket
208            //
209            // Re-sizing will result in re-distribution, doubling the buckets
210            // and reducing the entries by half. This will result in
211            // untreeifying when the number of entries is less than untreeify
212            // threshold (see HashMap/ConcurrentMap.UNTREEIFY_THRESHOLD)
213            return (i % 4) + (i / 4) * INITIAL_CAPACITY;
214        }
215
216        @Override
217        public boolean equals(Object obj) {
218            if (obj instanceof HashCodeInteger) {
219                HashCodeInteger other = (HashCodeInteger) obj;
220                return other.value == value;
221            }
222            return false;
223        }
224
225        @Override
226        public int hashCode() {
227            return hashcode;
228        }
229
230        @Override
231        public int compareTo(HashCodeInteger o) {
232            return value - o.value;
233        }
234
235        @Override
236        public String toString() {
237            return Integer.toString(value);
238        }
239    }
240}
241