• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 The Guava Authors
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 package com.google.common.collect;
18 
19 import static java.util.Objects.requireNonNull;
20 
21 import com.google.common.annotations.GwtIncompatible;
22 import com.google.errorprone.annotations.CanIgnoreReturnValue;
23 import java.util.Arrays;
24 import java.util.Collection;
25 import java.util.Collections;
26 import java.util.Set;
27 import java.util.Spliterator;
28 import java.util.Spliterators;
29 import javax.annotation.CheckForNull;
30 import org.checkerframework.checker.nullness.qual.Nullable;
31 
32 /**
33  * CompactLinkedHashSet is an implementation of a Set, which a predictable iteration order that
34  * matches the insertion order. All optional operations (adding and removing) are supported. All
35  * elements, including {@code null}, are permitted.
36  *
37  * <p>{@code contains(x)}, {@code add(x)} and {@code remove(x)}, are all (expected and amortized)
38  * constant time operations. Expected in the hashtable sense (depends on the hash function doing a
39  * good job of distributing the elements to the buckets to a distribution not far from uniform), and
40  * amortized since some operations can trigger a hash table resize.
41  *
42  * <p>This implementation consumes significantly less memory than {@code java.util.LinkedHashSet} or
43  * even {@code java.util.HashSet}, and places considerably less load on the garbage collector. Like
44  * {@code java.util.LinkedHashSet}, it offers insertion-order iteration, with identical behavior.
45  *
46  * <p>This class should not be assumed to be universally superior to {@code
47  * java.util.LinkedHashSet}. Generally speaking, this class reduces object allocation and memory
48  * consumption at the price of moderately increased constant factors of CPU. Only use this class
49  * when there is a specific reason to prioritize memory over CPU.
50  *
51  * @author Louis Wasserman
52  */
53 @GwtIncompatible // not worth using in GWT for now
54 @ElementTypesAreNonnullByDefault
55 class CompactLinkedHashSet<E extends @Nullable Object> extends CompactHashSet<E> {
56 
57   /** Creates an empty {@code CompactLinkedHashSet} instance. */
create()58   public static <E extends @Nullable Object> CompactLinkedHashSet<E> create() {
59     return new CompactLinkedHashSet<>();
60   }
61 
62   /**
63    * Creates a <i>mutable</i> {@code CompactLinkedHashSet} instance containing the elements of the
64    * given collection in the order returned by the collection's iterator.
65    *
66    * @param collection the elements that the set should contain
67    * @return a new {@code CompactLinkedHashSet} containing those elements (minus duplicates)
68    */
create( Collection<? extends E> collection)69   public static <E extends @Nullable Object> CompactLinkedHashSet<E> create(
70       Collection<? extends E> collection) {
71     CompactLinkedHashSet<E> set = createWithExpectedSize(collection.size());
72     set.addAll(collection);
73     return set;
74   }
75 
76   /**
77    * Creates a {@code CompactLinkedHashSet} instance containing the given elements in unspecified
78    * order.
79    *
80    * @param elements the elements that the set should contain
81    * @return a new {@code CompactLinkedHashSet} containing those elements (minus duplicates)
82    */
83   @SafeVarargs
create(E... elements)84   public static <E extends @Nullable Object> CompactLinkedHashSet<E> create(E... elements) {
85     CompactLinkedHashSet<E> set = createWithExpectedSize(elements.length);
86     Collections.addAll(set, elements);
87     return set;
88   }
89 
90   /**
91    * Creates a {@code CompactLinkedHashSet} instance, with a high enough "initial capacity" that it
92    * <i>should</i> hold {@code expectedSize} elements without rebuilding internal data structures.
93    *
94    * @param expectedSize the number of elements you expect to add to the returned set
95    * @return a new, empty {@code CompactLinkedHashSet} with enough capacity to hold {@code
96    *     expectedSize} elements without resizing
97    * @throws IllegalArgumentException if {@code expectedSize} is negative
98    */
createWithExpectedSize( int expectedSize)99   public static <E extends @Nullable Object> CompactLinkedHashSet<E> createWithExpectedSize(
100       int expectedSize) {
101     return new CompactLinkedHashSet<>(expectedSize);
102   }
103 
104   private static final int ENDPOINT = -2;
105 
106   // TODO(user): predecessors and successors should be collocated (reducing cache misses).
107   // Might also explore collocating all of [hash, next, predecessor, successor] fields of an
108   // entry in a *single* long[], though that reduces the maximum size of the set by a factor of 2
109 
110   /**
111    * Pointer to the predecessor of an entry in insertion order. ENDPOINT indicates a node is the
112    * first node in insertion order; all values at indices ≥ {@link #size()} are UNSET.
113    */
114   @CheckForNull private transient int[] predecessor;
115 
116   /**
117    * Pointer to the successor of an entry in insertion order. ENDPOINT indicates a node is the last
118    * node in insertion order; all values at indices ≥ {@link #size()} are UNSET.
119    */
120   @CheckForNull private transient int[] successor;
121 
122   /** Pointer to the first node in the linked list, or {@code ENDPOINT} if there are no entries. */
123   private transient int firstEntry;
124 
125   /** Pointer to the last node in the linked list, or {@code ENDPOINT} if there are no entries. */
126   private transient int lastEntry;
127 
CompactLinkedHashSet()128   CompactLinkedHashSet() {
129     super();
130   }
131 
CompactLinkedHashSet(int expectedSize)132   CompactLinkedHashSet(int expectedSize) {
133     super(expectedSize);
134   }
135 
136   @Override
init(int expectedSize)137   void init(int expectedSize) {
138     super.init(expectedSize);
139     this.firstEntry = ENDPOINT;
140     this.lastEntry = ENDPOINT;
141   }
142 
143   @Override
allocArrays()144   int allocArrays() {
145     int expectedSize = super.allocArrays();
146     this.predecessor = new int[expectedSize];
147     this.successor = new int[expectedSize];
148     return expectedSize;
149   }
150 
151   @Override
152   @CanIgnoreReturnValue
convertToHashFloodingResistantImplementation()153   Set<E> convertToHashFloodingResistantImplementation() {
154     Set<E> result = super.convertToHashFloodingResistantImplementation();
155     this.predecessor = null;
156     this.successor = null;
157     return result;
158   }
159 
160   /*
161    * For discussion of the safety of the following methods for operating on predecessors and
162    * successors, see the comments near the end of CompactHashMap, noting that the methods here call
163    * requirePredecessors() and requireSuccessors(), which are defined at the end of this file.
164    */
165 
getPredecessor(int entry)166   private int getPredecessor(int entry) {
167     return requirePredecessors()[entry] - 1;
168   }
169 
170   @Override
getSuccessor(int entry)171   int getSuccessor(int entry) {
172     return requireSuccessors()[entry] - 1;
173   }
174 
setSuccessor(int entry, int succ)175   private void setSuccessor(int entry, int succ) {
176     requireSuccessors()[entry] = succ + 1;
177   }
178 
setPredecessor(int entry, int pred)179   private void setPredecessor(int entry, int pred) {
180     requirePredecessors()[entry] = pred + 1;
181   }
182 
setSucceeds(int pred, int succ)183   private void setSucceeds(int pred, int succ) {
184     if (pred == ENDPOINT) {
185       firstEntry = succ;
186     } else {
187       setSuccessor(pred, succ);
188     }
189 
190     if (succ == ENDPOINT) {
191       lastEntry = pred;
192     } else {
193       setPredecessor(succ, pred);
194     }
195   }
196 
197   @Override
insertEntry(int entryIndex, @ParametricNullness E object, int hash, int mask)198   void insertEntry(int entryIndex, @ParametricNullness E object, int hash, int mask) {
199     super.insertEntry(entryIndex, object, hash, mask);
200     setSucceeds(lastEntry, entryIndex);
201     setSucceeds(entryIndex, ENDPOINT);
202   }
203 
204   @Override
moveLastEntry(int dstIndex, int mask)205   void moveLastEntry(int dstIndex, int mask) {
206     int srcIndex = size() - 1;
207     super.moveLastEntry(dstIndex, mask);
208 
209     setSucceeds(getPredecessor(dstIndex), getSuccessor(dstIndex));
210     if (dstIndex < srcIndex) {
211       setSucceeds(getPredecessor(srcIndex), dstIndex);
212       setSucceeds(dstIndex, getSuccessor(srcIndex));
213     }
214     requirePredecessors()[srcIndex] = 0;
215     requireSuccessors()[srcIndex] = 0;
216   }
217 
218   @Override
resizeEntries(int newCapacity)219   void resizeEntries(int newCapacity) {
220     super.resizeEntries(newCapacity);
221     predecessor = Arrays.copyOf(requirePredecessors(), newCapacity);
222     successor = Arrays.copyOf(requireSuccessors(), newCapacity);
223   }
224 
225   @Override
firstEntryIndex()226   int firstEntryIndex() {
227     return firstEntry;
228   }
229 
230   @Override
adjustAfterRemove(int indexBeforeRemove, int indexRemoved)231   int adjustAfterRemove(int indexBeforeRemove, int indexRemoved) {
232     return (indexBeforeRemove >= size()) ? indexRemoved : indexBeforeRemove;
233   }
234 
235   @Override
toArray()236   public @Nullable Object[] toArray() {
237     return ObjectArrays.toArrayImpl(this);
238   }
239 
240   @Override
241   @SuppressWarnings("nullness") // b/192354773 in our checker affects toArray declarations
toArray(T[] a)242   public <T extends @Nullable Object> T[] toArray(T[] a) {
243     return ObjectArrays.toArrayImpl(this, a);
244   }
245 
246   @Override
spliterator()247   public Spliterator<E> spliterator() {
248     return Spliterators.spliterator(this, Spliterator.ORDERED | Spliterator.DISTINCT);
249   }
250 
251   @Override
clear()252   public void clear() {
253     if (needsAllocArrays()) {
254       return;
255     }
256     this.firstEntry = ENDPOINT;
257     this.lastEntry = ENDPOINT;
258     // Either both arrays are null or neither is, but we check both to satisfy the nullness checker.
259     if (predecessor != null && successor != null) {
260       Arrays.fill(predecessor, 0, size(), 0);
261       Arrays.fill(successor, 0, size(), 0);
262     }
263     super.clear();
264   }
265 
266   /*
267    * For discussion of the safety of the following methods, see the comments near the end of
268    * CompactHashMap.
269    */
270 
requirePredecessors()271   private int[] requirePredecessors() {
272     return requireNonNull(predecessor);
273   }
274 
requireSuccessors()275   private int[] requireSuccessors() {
276     return requireNonNull(successor);
277   }
278 
279   /*
280    * We don't define getPredecessor+getSuccessor and setPredecessor+setSuccessor here because
281    * they're defined above -- including logic to add and subtract 1 to map between the values stored
282    * in the predecessor/successor arrays and the indexes in the elements array that they identify.
283    */
284 }
285