1 /* 2 * Copyright (C) 2009 The Guava Authors 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except 5 * in compliance with the License. You may obtain a copy of the License at 6 * 7 * http://www.apache.org/licenses/LICENSE-2.0 8 * 9 * Unless required by applicable law or agreed to in writing, software distributed under the License 10 * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express 11 * or implied. See the License for the specific language governing permissions and limitations under 12 * the License. 13 */ 14 15 package com.google.common.collect; 16 17 import static com.google.common.base.Preconditions.checkNotNull; 18 import static com.google.common.collect.CollectPreconditions.checkRemove; 19 20 import com.google.common.annotations.VisibleForTesting; 21 import com.google.common.base.Equivalence; 22 import com.google.common.base.Ticker; 23 import com.google.common.collect.GenericMapMaker.NullListener; 24 import com.google.common.collect.MapMaker.RemovalCause; 25 import com.google.common.collect.MapMaker.RemovalListener; 26 import com.google.common.collect.MapMaker.RemovalNotification; 27 import com.google.common.primitives.Ints; 28 29 import java.io.IOException; 30 import java.io.ObjectInputStream; 31 import java.io.ObjectOutputStream; 32 import java.io.Serializable; 33 import java.lang.ref.Reference; 34 import java.lang.ref.ReferenceQueue; 35 import java.lang.ref.SoftReference; 36 import java.lang.ref.WeakReference; 37 import java.util.AbstractCollection; 38 import java.util.AbstractMap; 39 import java.util.AbstractQueue; 40 import java.util.AbstractSet; 41 import java.util.Collection; 42 import java.util.Iterator; 43 import java.util.Map; 44 import java.util.NoSuchElementException; 45 import java.util.Queue; 46 import java.util.Set; 47 import java.util.concurrent.CancellationException; 48 import java.util.concurrent.ConcurrentLinkedQueue; 49 import java.util.concurrent.ConcurrentMap; 50 import java.util.concurrent.ExecutionException; 51 import java.util.concurrent.TimeUnit; 52 import java.util.concurrent.atomic.AtomicInteger; 53 import java.util.concurrent.atomic.AtomicReferenceArray; 54 import java.util.concurrent.locks.ReentrantLock; 55 import java.util.logging.Level; 56 import java.util.logging.Logger; 57 58 import javax.annotation.Nullable; 59 import javax.annotation.concurrent.GuardedBy; 60 61 /** 62 * The concurrent hash map implementation built by {@link MapMaker}. 63 * 64 * <p>This implementation is heavily derived from revision 1.96 of <a 65 * href="http://tinyurl.com/ConcurrentHashMap">ConcurrentHashMap.java</a>. 66 * 67 * @author Bob Lee 68 * @author Charles Fry 69 * @author Doug Lea ({@code ConcurrentHashMap}) 70 */ 71 class MapMakerInternalMap<K, V> 72 extends AbstractMap<K, V> implements ConcurrentMap<K, V>, Serializable { 73 74 /* 75 * The basic strategy is to subdivide the table among Segments, each of which itself is a 76 * concurrently readable hash table. The map supports non-blocking reads and concurrent writes 77 * across different segments. 78 * 79 * If a maximum size is specified, a best-effort bounding is performed per segment, using a 80 * page-replacement algorithm to determine which entries to evict when the capacity has been 81 * exceeded. 82 * 83 * The page replacement algorithm's data structures are kept casually consistent with the map. The 84 * ordering of writes to a segment is sequentially consistent. An update to the map and recording 85 * of reads may not be immediately reflected on the algorithm's data structures. These structures 86 * are guarded by a lock and operations are applied in batches to avoid lock contention. The 87 * penalty of applying the batches is spread across threads so that the amortized cost is slightly 88 * higher than performing just the operation without enforcing the capacity constraint. 89 * 90 * This implementation uses a per-segment queue to record a memento of the additions, removals, 91 * and accesses that were performed on the map. The queue is drained on writes and when it exceeds 92 * its capacity threshold. 93 * 94 * The Least Recently Used page replacement algorithm was chosen due to its simplicity, high hit 95 * rate, and ability to be implemented with O(1) time complexity. The initial LRU implementation 96 * operates per-segment rather than globally for increased implementation simplicity. We expect 97 * the cache hit rate to be similar to that of a global LRU algorithm. 98 */ 99 100 // Constants 101 102 /** 103 * The maximum capacity, used if a higher value is implicitly specified by either of the 104 * constructors with arguments. MUST be a power of two <= 1<<30 to ensure that entries are 105 * indexable using ints. 106 */ 107 static final int MAXIMUM_CAPACITY = Ints.MAX_POWER_OF_TWO; 108 109 /** The maximum number of segments to allow; used to bound constructor arguments. */ 110 static final int MAX_SEGMENTS = 1 << 16; // slightly conservative 111 112 /** Number of (unsynchronized) retries in the containsValue method. */ 113 static final int CONTAINS_VALUE_RETRIES = 3; 114 115 /** 116 * Number of cache access operations that can be buffered per segment before the cache's recency 117 * ordering information is updated. This is used to avoid lock contention by recording a memento 118 * of reads and delaying a lock acquisition until the threshold is crossed or a mutation occurs. 119 * 120 * <p>This must be a (2^n)-1 as it is used as a mask. 121 */ 122 static final int DRAIN_THRESHOLD = 0x3F; 123 124 /** 125 * Maximum number of entries to be drained in a single cleanup run. This applies independently to 126 * the cleanup queue and both reference queues. 127 */ 128 // TODO(fry): empirically optimize this 129 static final int DRAIN_MAX = 16; 130 131 static final long CLEANUP_EXECUTOR_DELAY_SECS = 60; 132 133 // Fields 134 135 private static final Logger logger = Logger.getLogger(MapMakerInternalMap.class.getName()); 136 137 /** 138 * Mask value for indexing into segments. The upper bits of a key's hash code are used to choose 139 * the segment. 140 */ 141 final transient int segmentMask; 142 143 /** 144 * Shift value for indexing within segments. Helps prevent entries that end up in the same segment 145 * from also ending up in the same bucket. 146 */ 147 final transient int segmentShift; 148 149 /** The segments, each of which is a specialized hash table. */ 150 final transient Segment<K, V>[] segments; 151 152 /** The concurrency level. */ 153 final int concurrencyLevel; 154 155 /** Strategy for comparing keys. */ 156 final Equivalence<Object> keyEquivalence; 157 158 /** Strategy for comparing values. */ 159 final Equivalence<Object> valueEquivalence; 160 161 /** Strategy for referencing keys. */ 162 final Strength keyStrength; 163 164 /** Strategy for referencing values. */ 165 final Strength valueStrength; 166 167 /** The maximum size of this map. MapMaker.UNSET_INT if there is no maximum. */ 168 final int maximumSize; 169 170 /** How long after the last access to an entry the map will retain that entry. */ 171 final long expireAfterAccessNanos; 172 173 /** How long after the last write to an entry the map will retain that entry. */ 174 final long expireAfterWriteNanos; 175 176 /** Entries waiting to be consumed by the removal listener. */ 177 // TODO(fry): define a new type which creates event objects and automates the clear logic 178 final Queue<RemovalNotification<K, V>> removalNotificationQueue; 179 180 /** 181 * A listener that is invoked when an entry is removed due to expiration or garbage collection of 182 * soft/weak entries. 183 */ 184 final RemovalListener<K, V> removalListener; 185 186 /** Factory used to create new entries. */ 187 final transient EntryFactory entryFactory; 188 189 /** Measures time in a testable way. */ 190 final Ticker ticker; 191 192 /** 193 * Creates a new, empty map with the specified strategy, initial capacity and concurrency level. 194 */ MapMakerInternalMap(MapMaker builder)195 MapMakerInternalMap(MapMaker builder) { 196 concurrencyLevel = Math.min(builder.getConcurrencyLevel(), MAX_SEGMENTS); 197 198 keyStrength = builder.getKeyStrength(); 199 valueStrength = builder.getValueStrength(); 200 201 keyEquivalence = builder.getKeyEquivalence(); 202 valueEquivalence = valueStrength.defaultEquivalence(); 203 204 maximumSize = builder.maximumSize; 205 expireAfterAccessNanos = builder.getExpireAfterAccessNanos(); 206 expireAfterWriteNanos = builder.getExpireAfterWriteNanos(); 207 208 entryFactory = EntryFactory.getFactory(keyStrength, expires(), evictsBySize()); 209 ticker = builder.getTicker(); 210 211 removalListener = builder.getRemovalListener(); 212 removalNotificationQueue = (removalListener == NullListener.INSTANCE) 213 ? MapMakerInternalMap.<RemovalNotification<K, V>>discardingQueue() 214 : new ConcurrentLinkedQueue<RemovalNotification<K, V>>(); 215 216 int initialCapacity = Math.min(builder.getInitialCapacity(), MAXIMUM_CAPACITY); 217 if (evictsBySize()) { 218 initialCapacity = Math.min(initialCapacity, maximumSize); 219 } 220 221 // Find power-of-two sizes best matching arguments. Constraints: 222 // (segmentCount <= maximumSize) 223 // && (concurrencyLevel > maximumSize || segmentCount > concurrencyLevel) 224 int segmentShift = 0; 225 int segmentCount = 1; 226 while (segmentCount < concurrencyLevel 227 && (!evictsBySize() || segmentCount * 2 <= maximumSize)) { 228 ++segmentShift; 229 segmentCount <<= 1; 230 } 231 this.segmentShift = 32 - segmentShift; 232 segmentMask = segmentCount - 1; 233 234 this.segments = newSegmentArray(segmentCount); 235 236 int segmentCapacity = initialCapacity / segmentCount; 237 if (segmentCapacity * segmentCount < initialCapacity) { 238 ++segmentCapacity; 239 } 240 241 int segmentSize = 1; 242 while (segmentSize < segmentCapacity) { 243 segmentSize <<= 1; 244 } 245 246 if (evictsBySize()) { 247 // Ensure sum of segment max sizes = overall max size 248 int maximumSegmentSize = maximumSize / segmentCount + 1; 249 int remainder = maximumSize % segmentCount; 250 for (int i = 0; i < this.segments.length; ++i) { 251 if (i == remainder) { 252 maximumSegmentSize--; 253 } 254 this.segments[i] = 255 createSegment(segmentSize, maximumSegmentSize); 256 } 257 } else { 258 for (int i = 0; i < this.segments.length; ++i) { 259 this.segments[i] = 260 createSegment(segmentSize, MapMaker.UNSET_INT); 261 } 262 } 263 } 264 evictsBySize()265 boolean evictsBySize() { 266 return maximumSize != MapMaker.UNSET_INT; 267 } 268 expires()269 boolean expires() { 270 return expiresAfterWrite() || expiresAfterAccess(); 271 } 272 expiresAfterWrite()273 boolean expiresAfterWrite() { 274 return expireAfterWriteNanos > 0; 275 } 276 expiresAfterAccess()277 boolean expiresAfterAccess() { 278 return expireAfterAccessNanos > 0; 279 } 280 usesKeyReferences()281 boolean usesKeyReferences() { 282 return keyStrength != Strength.STRONG; 283 } 284 usesValueReferences()285 boolean usesValueReferences() { 286 return valueStrength != Strength.STRONG; 287 } 288 289 enum Strength { 290 /* 291 * TODO(kevinb): If we strongly reference the value and aren't computing, we needn't wrap the 292 * value. This could save ~8 bytes per entry. 293 */ 294 295 STRONG { 296 @Override referenceValue( Segment<K, V> segment, ReferenceEntry<K, V> entry, V value)297 <K, V> ValueReference<K, V> referenceValue( 298 Segment<K, V> segment, ReferenceEntry<K, V> entry, V value) { 299 return new StrongValueReference<K, V>(value); 300 } 301 302 @Override defaultEquivalence()303 Equivalence<Object> defaultEquivalence() { 304 return Equivalence.equals(); 305 } 306 }, 307 308 SOFT { 309 @Override referenceValue( Segment<K, V> segment, ReferenceEntry<K, V> entry, V value)310 <K, V> ValueReference<K, V> referenceValue( 311 Segment<K, V> segment, ReferenceEntry<K, V> entry, V value) { 312 return new SoftValueReference<K, V>(segment.valueReferenceQueue, value, entry); 313 } 314 315 @Override defaultEquivalence()316 Equivalence<Object> defaultEquivalence() { 317 return Equivalence.identity(); 318 } 319 }, 320 321 WEAK { 322 @Override referenceValue( Segment<K, V> segment, ReferenceEntry<K, V> entry, V value)323 <K, V> ValueReference<K, V> referenceValue( 324 Segment<K, V> segment, ReferenceEntry<K, V> entry, V value) { 325 return new WeakValueReference<K, V>(segment.valueReferenceQueue, value, entry); 326 } 327 328 @Override defaultEquivalence()329 Equivalence<Object> defaultEquivalence() { 330 return Equivalence.identity(); 331 } 332 }; 333 334 /** 335 * Creates a reference for the given value according to this value strength. 336 */ referenceValue( Segment<K, V> segment, ReferenceEntry<K, V> entry, V value)337 abstract <K, V> ValueReference<K, V> referenceValue( 338 Segment<K, V> segment, ReferenceEntry<K, V> entry, V value); 339 340 /** 341 * Returns the default equivalence strategy used to compare and hash keys or values referenced 342 * at this strength. This strategy will be used unless the user explicitly specifies an 343 * alternate strategy. 344 */ defaultEquivalence()345 abstract Equivalence<Object> defaultEquivalence(); 346 } 347 348 /** 349 * Creates new entries. 350 */ 351 enum EntryFactory { 352 STRONG { 353 @Override newEntry( Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next)354 <K, V> ReferenceEntry<K, V> newEntry( 355 Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 356 return new StrongEntry<K, V>(key, hash, next); 357 } 358 }, 359 STRONG_EXPIRABLE { 360 @Override newEntry( Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next)361 <K, V> ReferenceEntry<K, V> newEntry( 362 Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 363 return new StrongExpirableEntry<K, V>(key, hash, next); 364 } 365 366 @Override copyEntry( Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext)367 <K, V> ReferenceEntry<K, V> copyEntry( 368 Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) { 369 ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext); 370 copyExpirableEntry(original, newEntry); 371 return newEntry; 372 } 373 }, 374 STRONG_EVICTABLE { 375 @Override newEntry( Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next)376 <K, V> ReferenceEntry<K, V> newEntry( 377 Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 378 return new StrongEvictableEntry<K, V>(key, hash, next); 379 } 380 381 @Override copyEntry( Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext)382 <K, V> ReferenceEntry<K, V> copyEntry( 383 Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) { 384 ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext); 385 copyEvictableEntry(original, newEntry); 386 return newEntry; 387 } 388 }, 389 STRONG_EXPIRABLE_EVICTABLE { 390 @Override newEntry( Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next)391 <K, V> ReferenceEntry<K, V> newEntry( 392 Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 393 return new StrongExpirableEvictableEntry<K, V>(key, hash, next); 394 } 395 396 @Override copyEntry( Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext)397 <K, V> ReferenceEntry<K, V> copyEntry( 398 Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) { 399 ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext); 400 copyExpirableEntry(original, newEntry); 401 copyEvictableEntry(original, newEntry); 402 return newEntry; 403 } 404 }, 405 406 WEAK { 407 @Override newEntry( Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next)408 <K, V> ReferenceEntry<K, V> newEntry( 409 Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 410 return new WeakEntry<K, V>(segment.keyReferenceQueue, key, hash, next); 411 } 412 }, 413 WEAK_EXPIRABLE { 414 @Override newEntry( Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next)415 <K, V> ReferenceEntry<K, V> newEntry( 416 Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 417 return new WeakExpirableEntry<K, V>(segment.keyReferenceQueue, key, hash, next); 418 } 419 420 @Override copyEntry( Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext)421 <K, V> ReferenceEntry<K, V> copyEntry( 422 Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) { 423 ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext); 424 copyExpirableEntry(original, newEntry); 425 return newEntry; 426 } 427 }, 428 WEAK_EVICTABLE { 429 @Override newEntry( Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next)430 <K, V> ReferenceEntry<K, V> newEntry( 431 Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 432 return new WeakEvictableEntry<K, V>(segment.keyReferenceQueue, key, hash, next); 433 } 434 435 @Override copyEntry( Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext)436 <K, V> ReferenceEntry<K, V> copyEntry( 437 Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) { 438 ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext); 439 copyEvictableEntry(original, newEntry); 440 return newEntry; 441 } 442 }, 443 WEAK_EXPIRABLE_EVICTABLE { 444 @Override newEntry( Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next)445 <K, V> ReferenceEntry<K, V> newEntry( 446 Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 447 return new WeakExpirableEvictableEntry<K, V>(segment.keyReferenceQueue, key, hash, next); 448 } 449 450 @Override copyEntry( Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext)451 <K, V> ReferenceEntry<K, V> copyEntry( 452 Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) { 453 ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext); 454 copyExpirableEntry(original, newEntry); 455 copyEvictableEntry(original, newEntry); 456 return newEntry; 457 } 458 }; 459 460 /** 461 * Masks used to compute indices in the following table. 462 */ 463 static final int EXPIRABLE_MASK = 1; 464 static final int EVICTABLE_MASK = 2; 465 466 /** 467 * Look-up table for factories. First dimension is the reference type. The second dimension is 468 * the result of OR-ing the feature masks. 469 */ 470 static final EntryFactory[][] factories = { 471 { STRONG, STRONG_EXPIRABLE, STRONG_EVICTABLE, STRONG_EXPIRABLE_EVICTABLE }, 472 {}, // no support for SOFT keys 473 { WEAK, WEAK_EXPIRABLE, WEAK_EVICTABLE, WEAK_EXPIRABLE_EVICTABLE } 474 }; 475 getFactory(Strength keyStrength, boolean expireAfterWrite, boolean evictsBySize)476 static EntryFactory getFactory(Strength keyStrength, boolean expireAfterWrite, 477 boolean evictsBySize) { 478 int flags = (expireAfterWrite ? EXPIRABLE_MASK : 0) | (evictsBySize ? EVICTABLE_MASK : 0); 479 return factories[keyStrength.ordinal()][flags]; 480 } 481 482 /** 483 * Creates a new entry. 484 * 485 * @param segment to create the entry for 486 * @param key of the entry 487 * @param hash of the key 488 * @param next entry in the same bucket 489 */ newEntry( Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next)490 abstract <K, V> ReferenceEntry<K, V> newEntry( 491 Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next); 492 493 /** 494 * Copies an entry, assigning it a new {@code next} entry. 495 * 496 * @param original the entry to copy 497 * @param newNext entry in the same bucket 498 */ 499 // Guarded By Segment.this copyEntry( Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext)500 <K, V> ReferenceEntry<K, V> copyEntry( 501 Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) { 502 return newEntry(segment, original.getKey(), original.getHash(), newNext); 503 } 504 505 // Guarded By Segment.this copyExpirableEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newEntry)506 <K, V> void copyExpirableEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newEntry) { 507 // TODO(fry): when we link values instead of entries this method can go 508 // away, as can connectExpirables, nullifyExpirable. 509 newEntry.setExpirationTime(original.getExpirationTime()); 510 511 connectExpirables(original.getPreviousExpirable(), newEntry); 512 connectExpirables(newEntry, original.getNextExpirable()); 513 514 nullifyExpirable(original); 515 } 516 517 // Guarded By Segment.this copyEvictableEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newEntry)518 <K, V> void copyEvictableEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newEntry) { 519 // TODO(fry): when we link values instead of entries this method can go 520 // away, as can connectEvictables, nullifyEvictable. 521 connectEvictables(original.getPreviousEvictable(), newEntry); 522 connectEvictables(newEntry, original.getNextEvictable()); 523 524 nullifyEvictable(original); 525 } 526 } 527 528 /** 529 * A reference to a value. 530 */ 531 interface ValueReference<K, V> { 532 /** 533 * Gets the value. Does not block or throw exceptions. 534 */ get()535 V get(); 536 537 /** 538 * Waits for a value that may still be computing. Unlike get(), this method can block (in the 539 * case of FutureValueReference). 540 * 541 * @throws ExecutionException if the computing thread throws an exception 542 */ waitForValue()543 V waitForValue() throws ExecutionException; 544 545 /** 546 * Returns the entry associated with this value reference, or {@code null} if this value 547 * reference is independent of any entry. 548 */ getEntry()549 ReferenceEntry<K, V> getEntry(); 550 551 /** 552 * Creates a copy of this reference for the given entry. 553 * 554 * <p>{@code value} may be null only for a loading reference. 555 */ copyFor( ReferenceQueue<V> queue, @Nullable V value, ReferenceEntry<K, V> entry)556 ValueReference<K, V> copyFor( 557 ReferenceQueue<V> queue, @Nullable V value, ReferenceEntry<K, V> entry); 558 559 /** 560 * Clears this reference object. 561 * 562 * @param newValue the new value reference which will replace this one; this is only used during 563 * computation to immediately notify blocked threads of the new value 564 */ clear(@ullable ValueReference<K, V> newValue)565 void clear(@Nullable ValueReference<K, V> newValue); 566 567 /** 568 * Returns {@code true} if the value type is a computing reference (regardless of whether or not 569 * computation has completed). This is necessary to distiguish between partially-collected 570 * entries and computing entries, which need to be cleaned up differently. 571 */ isComputingReference()572 boolean isComputingReference(); 573 } 574 575 /** 576 * Placeholder. Indicates that the value hasn't been set yet. 577 */ 578 static final ValueReference<Object, Object> UNSET = new ValueReference<Object, Object>() { 579 @Override 580 public Object get() { 581 return null; 582 } 583 584 @Override 585 public ReferenceEntry<Object, Object> getEntry() { 586 return null; 587 } 588 589 @Override 590 public ValueReference<Object, Object> copyFor(ReferenceQueue<Object> queue, 591 @Nullable Object value, ReferenceEntry<Object, Object> entry) { 592 return this; 593 } 594 595 @Override 596 public boolean isComputingReference() { 597 return false; 598 } 599 600 @Override 601 public Object waitForValue() { 602 return null; 603 } 604 605 @Override 606 public void clear(ValueReference<Object, Object> newValue) {} 607 }; 608 609 /** 610 * Singleton placeholder that indicates a value is being computed. 611 */ 612 @SuppressWarnings("unchecked") // impl never uses a parameter or returns any non-null value unset()613 static <K, V> ValueReference<K, V> unset() { 614 return (ValueReference<K, V>) UNSET; 615 } 616 617 /** 618 * An entry in a reference map. 619 * 620 * Entries in the map can be in the following states: 621 * 622 * Valid: 623 * - Live: valid key/value are set 624 * - Computing: computation is pending 625 * 626 * Invalid: 627 * - Expired: time expired (key/value may still be set) 628 * - Collected: key/value was partially collected, but not yet cleaned up 629 */ 630 interface ReferenceEntry<K, V> { 631 /** 632 * Gets the value reference from this entry. 633 */ getValueReference()634 ValueReference<K, V> getValueReference(); 635 636 /** 637 * Sets the value reference for this entry. 638 */ setValueReference(ValueReference<K, V> valueReference)639 void setValueReference(ValueReference<K, V> valueReference); 640 641 /** 642 * Gets the next entry in the chain. 643 */ getNext()644 ReferenceEntry<K, V> getNext(); 645 646 /** 647 * Gets the entry's hash. 648 */ getHash()649 int getHash(); 650 651 /** 652 * Gets the key for this entry. 653 */ getKey()654 K getKey(); 655 656 /* 657 * Used by entries that are expirable. Expirable entries are maintained in a doubly-linked list. 658 * New entries are added at the tail of the list at write time; stale entries are expired from 659 * the head of the list. 660 */ 661 662 /** 663 * Gets the entry expiration time in ns. 664 */ getExpirationTime()665 long getExpirationTime(); 666 667 /** 668 * Sets the entry expiration time in ns. 669 */ setExpirationTime(long time)670 void setExpirationTime(long time); 671 672 /** 673 * Gets the next entry in the recency list. 674 */ getNextExpirable()675 ReferenceEntry<K, V> getNextExpirable(); 676 677 /** 678 * Sets the next entry in the recency list. 679 */ setNextExpirable(ReferenceEntry<K, V> next)680 void setNextExpirable(ReferenceEntry<K, V> next); 681 682 /** 683 * Gets the previous entry in the recency list. 684 */ getPreviousExpirable()685 ReferenceEntry<K, V> getPreviousExpirable(); 686 687 /** 688 * Sets the previous entry in the recency list. 689 */ setPreviousExpirable(ReferenceEntry<K, V> previous)690 void setPreviousExpirable(ReferenceEntry<K, V> previous); 691 692 /* 693 * Implemented by entries that are evictable. Evictable entries are maintained in a 694 * doubly-linked list. New entries are added at the tail of the list at write time and stale 695 * entries are expired from the head of the list. 696 */ 697 698 /** 699 * Gets the next entry in the recency list. 700 */ getNextEvictable()701 ReferenceEntry<K, V> getNextEvictable(); 702 703 /** 704 * Sets the next entry in the recency list. 705 */ setNextEvictable(ReferenceEntry<K, V> next)706 void setNextEvictable(ReferenceEntry<K, V> next); 707 708 /** 709 * Gets the previous entry in the recency list. 710 */ getPreviousEvictable()711 ReferenceEntry<K, V> getPreviousEvictable(); 712 713 /** 714 * Sets the previous entry in the recency list. 715 */ setPreviousEvictable(ReferenceEntry<K, V> previous)716 void setPreviousEvictable(ReferenceEntry<K, V> previous); 717 } 718 719 private enum NullEntry implements ReferenceEntry<Object, Object> { 720 INSTANCE; 721 722 @Override getValueReference()723 public ValueReference<Object, Object> getValueReference() { 724 return null; 725 } 726 727 @Override setValueReference(ValueReference<Object, Object> valueReference)728 public void setValueReference(ValueReference<Object, Object> valueReference) {} 729 730 @Override getNext()731 public ReferenceEntry<Object, Object> getNext() { 732 return null; 733 } 734 735 @Override getHash()736 public int getHash() { 737 return 0; 738 } 739 740 @Override getKey()741 public Object getKey() { 742 return null; 743 } 744 745 @Override getExpirationTime()746 public long getExpirationTime() { 747 return 0; 748 } 749 750 @Override setExpirationTime(long time)751 public void setExpirationTime(long time) {} 752 753 @Override getNextExpirable()754 public ReferenceEntry<Object, Object> getNextExpirable() { 755 return this; 756 } 757 758 @Override setNextExpirable(ReferenceEntry<Object, Object> next)759 public void setNextExpirable(ReferenceEntry<Object, Object> next) {} 760 761 @Override getPreviousExpirable()762 public ReferenceEntry<Object, Object> getPreviousExpirable() { 763 return this; 764 } 765 766 @Override setPreviousExpirable(ReferenceEntry<Object, Object> previous)767 public void setPreviousExpirable(ReferenceEntry<Object, Object> previous) {} 768 769 @Override getNextEvictable()770 public ReferenceEntry<Object, Object> getNextEvictable() { 771 return this; 772 } 773 774 @Override setNextEvictable(ReferenceEntry<Object, Object> next)775 public void setNextEvictable(ReferenceEntry<Object, Object> next) {} 776 777 @Override getPreviousEvictable()778 public ReferenceEntry<Object, Object> getPreviousEvictable() { 779 return this; 780 } 781 782 @Override setPreviousEvictable(ReferenceEntry<Object, Object> previous)783 public void setPreviousEvictable(ReferenceEntry<Object, Object> previous) {} 784 } 785 786 abstract static class AbstractReferenceEntry<K, V> implements ReferenceEntry<K, V> { 787 @Override getValueReference()788 public ValueReference<K, V> getValueReference() { 789 throw new UnsupportedOperationException(); 790 } 791 792 @Override setValueReference(ValueReference<K, V> valueReference)793 public void setValueReference(ValueReference<K, V> valueReference) { 794 throw new UnsupportedOperationException(); 795 } 796 797 @Override getNext()798 public ReferenceEntry<K, V> getNext() { 799 throw new UnsupportedOperationException(); 800 } 801 802 @Override getHash()803 public int getHash() { 804 throw new UnsupportedOperationException(); 805 } 806 807 @Override getKey()808 public K getKey() { 809 throw new UnsupportedOperationException(); 810 } 811 812 @Override getExpirationTime()813 public long getExpirationTime() { 814 throw new UnsupportedOperationException(); 815 } 816 817 @Override setExpirationTime(long time)818 public void setExpirationTime(long time) { 819 throw new UnsupportedOperationException(); 820 } 821 822 @Override getNextExpirable()823 public ReferenceEntry<K, V> getNextExpirable() { 824 throw new UnsupportedOperationException(); 825 } 826 827 @Override setNextExpirable(ReferenceEntry<K, V> next)828 public void setNextExpirable(ReferenceEntry<K, V> next) { 829 throw new UnsupportedOperationException(); 830 } 831 832 @Override getPreviousExpirable()833 public ReferenceEntry<K, V> getPreviousExpirable() { 834 throw new UnsupportedOperationException(); 835 } 836 837 @Override setPreviousExpirable(ReferenceEntry<K, V> previous)838 public void setPreviousExpirable(ReferenceEntry<K, V> previous) { 839 throw new UnsupportedOperationException(); 840 } 841 842 @Override getNextEvictable()843 public ReferenceEntry<K, V> getNextEvictable() { 844 throw new UnsupportedOperationException(); 845 } 846 847 @Override setNextEvictable(ReferenceEntry<K, V> next)848 public void setNextEvictable(ReferenceEntry<K, V> next) { 849 throw new UnsupportedOperationException(); 850 } 851 852 @Override getPreviousEvictable()853 public ReferenceEntry<K, V> getPreviousEvictable() { 854 throw new UnsupportedOperationException(); 855 } 856 857 @Override setPreviousEvictable(ReferenceEntry<K, V> previous)858 public void setPreviousEvictable(ReferenceEntry<K, V> previous) { 859 throw new UnsupportedOperationException(); 860 } 861 } 862 863 @SuppressWarnings("unchecked") // impl never uses a parameter or returns any non-null value nullEntry()864 static <K, V> ReferenceEntry<K, V> nullEntry() { 865 return (ReferenceEntry<K, V>) NullEntry.INSTANCE; 866 } 867 868 static final Queue<? extends Object> DISCARDING_QUEUE = new AbstractQueue<Object>() { 869 @Override 870 public boolean offer(Object o) { 871 return true; 872 } 873 874 @Override 875 public Object peek() { 876 return null; 877 } 878 879 @Override 880 public Object poll() { 881 return null; 882 } 883 884 @Override 885 public int size() { 886 return 0; 887 } 888 889 @Override 890 public Iterator<Object> iterator() { 891 return Iterators.emptyIterator(); 892 } 893 }; 894 895 /** 896 * Queue that discards all elements. 897 */ 898 @SuppressWarnings("unchecked") // impl never uses a parameter or returns any non-null value discardingQueue()899 static <E> Queue<E> discardingQueue() { 900 return (Queue) DISCARDING_QUEUE; 901 } 902 903 /* 904 * Note: All of this duplicate code sucks, but it saves a lot of memory. If only Java had mixins! 905 * To maintain this code, make a change for the strong reference type. Then, cut and paste, and 906 * replace "Strong" with "Soft" or "Weak" within the pasted text. The primary difference is that 907 * strong entries store the key reference directly while soft and weak entries delegate to their 908 * respective superclasses. 909 */ 910 911 /** 912 * Used for strongly-referenced keys. 913 */ 914 static class StrongEntry<K, V> implements ReferenceEntry<K, V> { 915 final K key; 916 StrongEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next)917 StrongEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) { 918 this.key = key; 919 this.hash = hash; 920 this.next = next; 921 } 922 923 @Override getKey()924 public K getKey() { 925 return this.key; 926 } 927 928 // null expiration 929 930 @Override getExpirationTime()931 public long getExpirationTime() { 932 throw new UnsupportedOperationException(); 933 } 934 935 @Override setExpirationTime(long time)936 public void setExpirationTime(long time) { 937 throw new UnsupportedOperationException(); 938 } 939 940 @Override getNextExpirable()941 public ReferenceEntry<K, V> getNextExpirable() { 942 throw new UnsupportedOperationException(); 943 } 944 945 @Override setNextExpirable(ReferenceEntry<K, V> next)946 public void setNextExpirable(ReferenceEntry<K, V> next) { 947 throw new UnsupportedOperationException(); 948 } 949 950 @Override getPreviousExpirable()951 public ReferenceEntry<K, V> getPreviousExpirable() { 952 throw new UnsupportedOperationException(); 953 } 954 955 @Override setPreviousExpirable(ReferenceEntry<K, V> previous)956 public void setPreviousExpirable(ReferenceEntry<K, V> previous) { 957 throw new UnsupportedOperationException(); 958 } 959 960 // null eviction 961 962 @Override getNextEvictable()963 public ReferenceEntry<K, V> getNextEvictable() { 964 throw new UnsupportedOperationException(); 965 } 966 967 @Override setNextEvictable(ReferenceEntry<K, V> next)968 public void setNextEvictable(ReferenceEntry<K, V> next) { 969 throw new UnsupportedOperationException(); 970 } 971 972 @Override getPreviousEvictable()973 public ReferenceEntry<K, V> getPreviousEvictable() { 974 throw new UnsupportedOperationException(); 975 } 976 977 @Override setPreviousEvictable(ReferenceEntry<K, V> previous)978 public void setPreviousEvictable(ReferenceEntry<K, V> previous) { 979 throw new UnsupportedOperationException(); 980 } 981 982 // The code below is exactly the same for each entry type. 983 984 final int hash; 985 final ReferenceEntry<K, V> next; 986 volatile ValueReference<K, V> valueReference = unset(); 987 988 @Override getValueReference()989 public ValueReference<K, V> getValueReference() { 990 return valueReference; 991 } 992 993 @Override setValueReference(ValueReference<K, V> valueReference)994 public void setValueReference(ValueReference<K, V> valueReference) { 995 ValueReference<K, V> previous = this.valueReference; 996 this.valueReference = valueReference; 997 previous.clear(valueReference); 998 } 999 1000 @Override getHash()1001 public int getHash() { 1002 return hash; 1003 } 1004 1005 @Override getNext()1006 public ReferenceEntry<K, V> getNext() { 1007 return next; 1008 } 1009 } 1010 1011 static final class StrongExpirableEntry<K, V> extends StrongEntry<K, V> 1012 implements ReferenceEntry<K, V> { StrongExpirableEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next)1013 StrongExpirableEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) { 1014 super(key, hash, next); 1015 } 1016 1017 // The code below is exactly the same for each expirable entry type. 1018 1019 volatile long time = Long.MAX_VALUE; 1020 1021 @Override getExpirationTime()1022 public long getExpirationTime() { 1023 return time; 1024 } 1025 1026 @Override setExpirationTime(long time)1027 public void setExpirationTime(long time) { 1028 this.time = time; 1029 } 1030 1031 // Guarded By Segment.this 1032 ReferenceEntry<K, V> nextExpirable = nullEntry(); 1033 1034 @Override getNextExpirable()1035 public ReferenceEntry<K, V> getNextExpirable() { 1036 return nextExpirable; 1037 } 1038 1039 @Override setNextExpirable(ReferenceEntry<K, V> next)1040 public void setNextExpirable(ReferenceEntry<K, V> next) { 1041 this.nextExpirable = next; 1042 } 1043 1044 // Guarded By Segment.this 1045 ReferenceEntry<K, V> previousExpirable = nullEntry(); 1046 1047 @Override getPreviousExpirable()1048 public ReferenceEntry<K, V> getPreviousExpirable() { 1049 return previousExpirable; 1050 } 1051 1052 @Override setPreviousExpirable(ReferenceEntry<K, V> previous)1053 public void setPreviousExpirable(ReferenceEntry<K, V> previous) { 1054 this.previousExpirable = previous; 1055 } 1056 } 1057 1058 static final class StrongEvictableEntry<K, V> 1059 extends StrongEntry<K, V> implements ReferenceEntry<K, V> { StrongEvictableEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next)1060 StrongEvictableEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) { 1061 super(key, hash, next); 1062 } 1063 1064 // The code below is exactly the same for each evictable entry type. 1065 1066 // Guarded By Segment.this 1067 ReferenceEntry<K, V> nextEvictable = nullEntry(); 1068 1069 @Override getNextEvictable()1070 public ReferenceEntry<K, V> getNextEvictable() { 1071 return nextEvictable; 1072 } 1073 1074 @Override setNextEvictable(ReferenceEntry<K, V> next)1075 public void setNextEvictable(ReferenceEntry<K, V> next) { 1076 this.nextEvictable = next; 1077 } 1078 1079 // Guarded By Segment.this 1080 ReferenceEntry<K, V> previousEvictable = nullEntry(); 1081 1082 @Override getPreviousEvictable()1083 public ReferenceEntry<K, V> getPreviousEvictable() { 1084 return previousEvictable; 1085 } 1086 1087 @Override setPreviousEvictable(ReferenceEntry<K, V> previous)1088 public void setPreviousEvictable(ReferenceEntry<K, V> previous) { 1089 this.previousEvictable = previous; 1090 } 1091 } 1092 1093 static final class StrongExpirableEvictableEntry<K, V> 1094 extends StrongEntry<K, V> implements ReferenceEntry<K, V> { StrongExpirableEvictableEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next)1095 StrongExpirableEvictableEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) { 1096 super(key, hash, next); 1097 } 1098 1099 // The code below is exactly the same for each expirable entry type. 1100 1101 volatile long time = Long.MAX_VALUE; 1102 1103 @Override getExpirationTime()1104 public long getExpirationTime() { 1105 return time; 1106 } 1107 1108 @Override setExpirationTime(long time)1109 public void setExpirationTime(long time) { 1110 this.time = time; 1111 } 1112 1113 // Guarded By Segment.this 1114 ReferenceEntry<K, V> nextExpirable = nullEntry(); 1115 1116 @Override getNextExpirable()1117 public ReferenceEntry<K, V> getNextExpirable() { 1118 return nextExpirable; 1119 } 1120 1121 @Override setNextExpirable(ReferenceEntry<K, V> next)1122 public void setNextExpirable(ReferenceEntry<K, V> next) { 1123 this.nextExpirable = next; 1124 } 1125 1126 // Guarded By Segment.this 1127 ReferenceEntry<K, V> previousExpirable = nullEntry(); 1128 1129 @Override getPreviousExpirable()1130 public ReferenceEntry<K, V> getPreviousExpirable() { 1131 return previousExpirable; 1132 } 1133 1134 @Override setPreviousExpirable(ReferenceEntry<K, V> previous)1135 public void setPreviousExpirable(ReferenceEntry<K, V> previous) { 1136 this.previousExpirable = previous; 1137 } 1138 1139 // The code below is exactly the same for each evictable entry type. 1140 1141 // Guarded By Segment.this 1142 ReferenceEntry<K, V> nextEvictable = nullEntry(); 1143 1144 @Override getNextEvictable()1145 public ReferenceEntry<K, V> getNextEvictable() { 1146 return nextEvictable; 1147 } 1148 1149 @Override setNextEvictable(ReferenceEntry<K, V> next)1150 public void setNextEvictable(ReferenceEntry<K, V> next) { 1151 this.nextEvictable = next; 1152 } 1153 1154 // Guarded By Segment.this 1155 ReferenceEntry<K, V> previousEvictable = nullEntry(); 1156 1157 @Override getPreviousEvictable()1158 public ReferenceEntry<K, V> getPreviousEvictable() { 1159 return previousEvictable; 1160 } 1161 1162 @Override setPreviousEvictable(ReferenceEntry<K, V> previous)1163 public void setPreviousEvictable(ReferenceEntry<K, V> previous) { 1164 this.previousEvictable = previous; 1165 } 1166 } 1167 1168 /** 1169 * Used for softly-referenced keys. 1170 */ 1171 static class SoftEntry<K, V> extends SoftReference<K> implements ReferenceEntry<K, V> { SoftEntry(ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next)1172 SoftEntry(ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 1173 super(key, queue); 1174 this.hash = hash; 1175 this.next = next; 1176 } 1177 1178 @Override getKey()1179 public K getKey() { 1180 return get(); 1181 } 1182 1183 // null expiration 1184 @Override getExpirationTime()1185 public long getExpirationTime() { 1186 throw new UnsupportedOperationException(); 1187 } 1188 1189 @Override setExpirationTime(long time)1190 public void setExpirationTime(long time) { 1191 throw new UnsupportedOperationException(); 1192 } 1193 1194 @Override getNextExpirable()1195 public ReferenceEntry<K, V> getNextExpirable() { 1196 throw new UnsupportedOperationException(); 1197 } 1198 1199 @Override setNextExpirable(ReferenceEntry<K, V> next)1200 public void setNextExpirable(ReferenceEntry<K, V> next) { 1201 throw new UnsupportedOperationException(); 1202 } 1203 1204 @Override getPreviousExpirable()1205 public ReferenceEntry<K, V> getPreviousExpirable() { 1206 throw new UnsupportedOperationException(); 1207 } 1208 1209 @Override setPreviousExpirable(ReferenceEntry<K, V> previous)1210 public void setPreviousExpirable(ReferenceEntry<K, V> previous) { 1211 throw new UnsupportedOperationException(); 1212 } 1213 1214 // null eviction 1215 1216 @Override getNextEvictable()1217 public ReferenceEntry<K, V> getNextEvictable() { 1218 throw new UnsupportedOperationException(); 1219 } 1220 1221 @Override setNextEvictable(ReferenceEntry<K, V> next)1222 public void setNextEvictable(ReferenceEntry<K, V> next) { 1223 throw new UnsupportedOperationException(); 1224 } 1225 1226 @Override getPreviousEvictable()1227 public ReferenceEntry<K, V> getPreviousEvictable() { 1228 throw new UnsupportedOperationException(); 1229 } 1230 1231 @Override setPreviousEvictable(ReferenceEntry<K, V> previous)1232 public void setPreviousEvictable(ReferenceEntry<K, V> previous) { 1233 throw new UnsupportedOperationException(); 1234 } 1235 1236 // The code below is exactly the same for each entry type. 1237 1238 final int hash; 1239 final ReferenceEntry<K, V> next; 1240 volatile ValueReference<K, V> valueReference = unset(); 1241 1242 @Override getValueReference()1243 public ValueReference<K, V> getValueReference() { 1244 return valueReference; 1245 } 1246 1247 @Override setValueReference(ValueReference<K, V> valueReference)1248 public void setValueReference(ValueReference<K, V> valueReference) { 1249 ValueReference<K, V> previous = this.valueReference; 1250 this.valueReference = valueReference; 1251 previous.clear(valueReference); 1252 } 1253 1254 @Override getHash()1255 public int getHash() { 1256 return hash; 1257 } 1258 1259 @Override getNext()1260 public ReferenceEntry<K, V> getNext() { 1261 return next; 1262 } 1263 } 1264 1265 static final class SoftExpirableEntry<K, V> 1266 extends SoftEntry<K, V> implements ReferenceEntry<K, V> { SoftExpirableEntry( ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next)1267 SoftExpirableEntry( 1268 ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 1269 super(queue, key, hash, next); 1270 } 1271 1272 // The code below is exactly the same for each expirable entry type. 1273 1274 volatile long time = Long.MAX_VALUE; 1275 1276 @Override getExpirationTime()1277 public long getExpirationTime() { 1278 return time; 1279 } 1280 1281 @Override setExpirationTime(long time)1282 public void setExpirationTime(long time) { 1283 this.time = time; 1284 } 1285 1286 // Guarded By Segment.this 1287 ReferenceEntry<K, V> nextExpirable = nullEntry(); 1288 1289 @Override getNextExpirable()1290 public ReferenceEntry<K, V> getNextExpirable() { 1291 return nextExpirable; 1292 } 1293 1294 @Override setNextExpirable(ReferenceEntry<K, V> next)1295 public void setNextExpirable(ReferenceEntry<K, V> next) { 1296 this.nextExpirable = next; 1297 } 1298 1299 // Guarded By Segment.this 1300 ReferenceEntry<K, V> previousExpirable = nullEntry(); 1301 1302 @Override getPreviousExpirable()1303 public ReferenceEntry<K, V> getPreviousExpirable() { 1304 return previousExpirable; 1305 } 1306 1307 @Override setPreviousExpirable(ReferenceEntry<K, V> previous)1308 public void setPreviousExpirable(ReferenceEntry<K, V> previous) { 1309 this.previousExpirable = previous; 1310 } 1311 } 1312 1313 static final class SoftEvictableEntry<K, V> 1314 extends SoftEntry<K, V> implements ReferenceEntry<K, V> { SoftEvictableEntry( ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next)1315 SoftEvictableEntry( 1316 ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 1317 super(queue, key, hash, next); 1318 } 1319 1320 // The code below is exactly the same for each evictable entry type. 1321 1322 // Guarded By Segment.this 1323 ReferenceEntry<K, V> nextEvictable = nullEntry(); 1324 1325 @Override getNextEvictable()1326 public ReferenceEntry<K, V> getNextEvictable() { 1327 return nextEvictable; 1328 } 1329 1330 @Override setNextEvictable(ReferenceEntry<K, V> next)1331 public void setNextEvictable(ReferenceEntry<K, V> next) { 1332 this.nextEvictable = next; 1333 } 1334 1335 // Guarded By Segment.this 1336 ReferenceEntry<K, V> previousEvictable = nullEntry(); 1337 1338 @Override getPreviousEvictable()1339 public ReferenceEntry<K, V> getPreviousEvictable() { 1340 return previousEvictable; 1341 } 1342 1343 @Override setPreviousEvictable(ReferenceEntry<K, V> previous)1344 public void setPreviousEvictable(ReferenceEntry<K, V> previous) { 1345 this.previousEvictable = previous; 1346 } 1347 } 1348 1349 static final class SoftExpirableEvictableEntry<K, V> 1350 extends SoftEntry<K, V> implements ReferenceEntry<K, V> { SoftExpirableEvictableEntry( ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next)1351 SoftExpirableEvictableEntry( 1352 ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 1353 super(queue, key, hash, next); 1354 } 1355 1356 // The code below is exactly the same for each expirable entry type. 1357 1358 volatile long time = Long.MAX_VALUE; 1359 1360 @Override getExpirationTime()1361 public long getExpirationTime() { 1362 return time; 1363 } 1364 1365 @Override setExpirationTime(long time)1366 public void setExpirationTime(long time) { 1367 this.time = time; 1368 } 1369 1370 // Guarded By Segment.this 1371 ReferenceEntry<K, V> nextExpirable = nullEntry(); 1372 1373 @Override getNextExpirable()1374 public ReferenceEntry<K, V> getNextExpirable() { 1375 return nextExpirable; 1376 } 1377 1378 @Override setNextExpirable(ReferenceEntry<K, V> next)1379 public void setNextExpirable(ReferenceEntry<K, V> next) { 1380 this.nextExpirable = next; 1381 } 1382 1383 // Guarded By Segment.this 1384 ReferenceEntry<K, V> previousExpirable = nullEntry(); 1385 1386 @Override getPreviousExpirable()1387 public ReferenceEntry<K, V> getPreviousExpirable() { 1388 return previousExpirable; 1389 } 1390 1391 @Override setPreviousExpirable(ReferenceEntry<K, V> previous)1392 public void setPreviousExpirable(ReferenceEntry<K, V> previous) { 1393 this.previousExpirable = previous; 1394 } 1395 1396 // The code below is exactly the same for each evictable entry type. 1397 1398 // Guarded By Segment.this 1399 ReferenceEntry<K, V> nextEvictable = nullEntry(); 1400 1401 @Override getNextEvictable()1402 public ReferenceEntry<K, V> getNextEvictable() { 1403 return nextEvictable; 1404 } 1405 1406 @Override setNextEvictable(ReferenceEntry<K, V> next)1407 public void setNextEvictable(ReferenceEntry<K, V> next) { 1408 this.nextEvictable = next; 1409 } 1410 1411 // Guarded By Segment.this 1412 ReferenceEntry<K, V> previousEvictable = nullEntry(); 1413 1414 @Override getPreviousEvictable()1415 public ReferenceEntry<K, V> getPreviousEvictable() { 1416 return previousEvictable; 1417 } 1418 1419 @Override setPreviousEvictable(ReferenceEntry<K, V> previous)1420 public void setPreviousEvictable(ReferenceEntry<K, V> previous) { 1421 this.previousEvictable = previous; 1422 } 1423 } 1424 1425 /** 1426 * Used for weakly-referenced keys. 1427 */ 1428 static class WeakEntry<K, V> extends WeakReference<K> implements ReferenceEntry<K, V> { WeakEntry(ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next)1429 WeakEntry(ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 1430 super(key, queue); 1431 this.hash = hash; 1432 this.next = next; 1433 } 1434 1435 @Override getKey()1436 public K getKey() { 1437 return get(); 1438 } 1439 1440 // null expiration 1441 1442 @Override getExpirationTime()1443 public long getExpirationTime() { 1444 throw new UnsupportedOperationException(); 1445 } 1446 1447 @Override setExpirationTime(long time)1448 public void setExpirationTime(long time) { 1449 throw new UnsupportedOperationException(); 1450 } 1451 1452 @Override getNextExpirable()1453 public ReferenceEntry<K, V> getNextExpirable() { 1454 throw new UnsupportedOperationException(); 1455 } 1456 1457 @Override setNextExpirable(ReferenceEntry<K, V> next)1458 public void setNextExpirable(ReferenceEntry<K, V> next) { 1459 throw new UnsupportedOperationException(); 1460 } 1461 1462 @Override getPreviousExpirable()1463 public ReferenceEntry<K, V> getPreviousExpirable() { 1464 throw new UnsupportedOperationException(); 1465 } 1466 1467 @Override setPreviousExpirable(ReferenceEntry<K, V> previous)1468 public void setPreviousExpirable(ReferenceEntry<K, V> previous) { 1469 throw new UnsupportedOperationException(); 1470 } 1471 1472 // null eviction 1473 1474 @Override getNextEvictable()1475 public ReferenceEntry<K, V> getNextEvictable() { 1476 throw new UnsupportedOperationException(); 1477 } 1478 1479 @Override setNextEvictable(ReferenceEntry<K, V> next)1480 public void setNextEvictable(ReferenceEntry<K, V> next) { 1481 throw new UnsupportedOperationException(); 1482 } 1483 1484 @Override getPreviousEvictable()1485 public ReferenceEntry<K, V> getPreviousEvictable() { 1486 throw new UnsupportedOperationException(); 1487 } 1488 1489 @Override setPreviousEvictable(ReferenceEntry<K, V> previous)1490 public void setPreviousEvictable(ReferenceEntry<K, V> previous) { 1491 throw new UnsupportedOperationException(); 1492 } 1493 1494 // The code below is exactly the same for each entry type. 1495 1496 final int hash; 1497 final ReferenceEntry<K, V> next; 1498 volatile ValueReference<K, V> valueReference = unset(); 1499 1500 @Override getValueReference()1501 public ValueReference<K, V> getValueReference() { 1502 return valueReference; 1503 } 1504 1505 @Override setValueReference(ValueReference<K, V> valueReference)1506 public void setValueReference(ValueReference<K, V> valueReference) { 1507 ValueReference<K, V> previous = this.valueReference; 1508 this.valueReference = valueReference; 1509 previous.clear(valueReference); 1510 } 1511 1512 @Override getHash()1513 public int getHash() { 1514 return hash; 1515 } 1516 1517 @Override getNext()1518 public ReferenceEntry<K, V> getNext() { 1519 return next; 1520 } 1521 } 1522 1523 static final class WeakExpirableEntry<K, V> 1524 extends WeakEntry<K, V> implements ReferenceEntry<K, V> { WeakExpirableEntry( ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next)1525 WeakExpirableEntry( 1526 ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 1527 super(queue, key, hash, next); 1528 } 1529 1530 // The code below is exactly the same for each expirable entry type. 1531 1532 volatile long time = Long.MAX_VALUE; 1533 1534 @Override getExpirationTime()1535 public long getExpirationTime() { 1536 return time; 1537 } 1538 1539 @Override setExpirationTime(long time)1540 public void setExpirationTime(long time) { 1541 this.time = time; 1542 } 1543 1544 // Guarded By Segment.this 1545 ReferenceEntry<K, V> nextExpirable = nullEntry(); 1546 1547 @Override getNextExpirable()1548 public ReferenceEntry<K, V> getNextExpirable() { 1549 return nextExpirable; 1550 } 1551 1552 @Override setNextExpirable(ReferenceEntry<K, V> next)1553 public void setNextExpirable(ReferenceEntry<K, V> next) { 1554 this.nextExpirable = next; 1555 } 1556 1557 // Guarded By Segment.this 1558 ReferenceEntry<K, V> previousExpirable = nullEntry(); 1559 1560 @Override getPreviousExpirable()1561 public ReferenceEntry<K, V> getPreviousExpirable() { 1562 return previousExpirable; 1563 } 1564 1565 @Override setPreviousExpirable(ReferenceEntry<K, V> previous)1566 public void setPreviousExpirable(ReferenceEntry<K, V> previous) { 1567 this.previousExpirable = previous; 1568 } 1569 } 1570 1571 static final class WeakEvictableEntry<K, V> 1572 extends WeakEntry<K, V> implements ReferenceEntry<K, V> { WeakEvictableEntry( ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next)1573 WeakEvictableEntry( 1574 ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 1575 super(queue, key, hash, next); 1576 } 1577 1578 // The code below is exactly the same for each evictable entry type. 1579 1580 // Guarded By Segment.this 1581 ReferenceEntry<K, V> nextEvictable = nullEntry(); 1582 1583 @Override getNextEvictable()1584 public ReferenceEntry<K, V> getNextEvictable() { 1585 return nextEvictable; 1586 } 1587 1588 @Override setNextEvictable(ReferenceEntry<K, V> next)1589 public void setNextEvictable(ReferenceEntry<K, V> next) { 1590 this.nextEvictable = next; 1591 } 1592 1593 // Guarded By Segment.this 1594 ReferenceEntry<K, V> previousEvictable = nullEntry(); 1595 1596 @Override getPreviousEvictable()1597 public ReferenceEntry<K, V> getPreviousEvictable() { 1598 return previousEvictable; 1599 } 1600 1601 @Override setPreviousEvictable(ReferenceEntry<K, V> previous)1602 public void setPreviousEvictable(ReferenceEntry<K, V> previous) { 1603 this.previousEvictable = previous; 1604 } 1605 } 1606 1607 static final class WeakExpirableEvictableEntry<K, V> 1608 extends WeakEntry<K, V> implements ReferenceEntry<K, V> { WeakExpirableEvictableEntry( ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next)1609 WeakExpirableEvictableEntry( 1610 ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 1611 super(queue, key, hash, next); 1612 } 1613 1614 // The code below is exactly the same for each expirable entry type. 1615 1616 volatile long time = Long.MAX_VALUE; 1617 1618 @Override getExpirationTime()1619 public long getExpirationTime() { 1620 return time; 1621 } 1622 1623 @Override setExpirationTime(long time)1624 public void setExpirationTime(long time) { 1625 this.time = time; 1626 } 1627 1628 // Guarded By Segment.this 1629 ReferenceEntry<K, V> nextExpirable = nullEntry(); 1630 1631 @Override getNextExpirable()1632 public ReferenceEntry<K, V> getNextExpirable() { 1633 return nextExpirable; 1634 } 1635 1636 @Override setNextExpirable(ReferenceEntry<K, V> next)1637 public void setNextExpirable(ReferenceEntry<K, V> next) { 1638 this.nextExpirable = next; 1639 } 1640 1641 // Guarded By Segment.this 1642 ReferenceEntry<K, V> previousExpirable = nullEntry(); 1643 1644 @Override getPreviousExpirable()1645 public ReferenceEntry<K, V> getPreviousExpirable() { 1646 return previousExpirable; 1647 } 1648 1649 @Override setPreviousExpirable(ReferenceEntry<K, V> previous)1650 public void setPreviousExpirable(ReferenceEntry<K, V> previous) { 1651 this.previousExpirable = previous; 1652 } 1653 1654 // The code below is exactly the same for each evictable entry type. 1655 1656 // Guarded By Segment.this 1657 ReferenceEntry<K, V> nextEvictable = nullEntry(); 1658 1659 @Override getNextEvictable()1660 public ReferenceEntry<K, V> getNextEvictable() { 1661 return nextEvictable; 1662 } 1663 1664 @Override setNextEvictable(ReferenceEntry<K, V> next)1665 public void setNextEvictable(ReferenceEntry<K, V> next) { 1666 this.nextEvictable = next; 1667 } 1668 1669 // Guarded By Segment.this 1670 ReferenceEntry<K, V> previousEvictable = nullEntry(); 1671 1672 @Override getPreviousEvictable()1673 public ReferenceEntry<K, V> getPreviousEvictable() { 1674 return previousEvictable; 1675 } 1676 1677 @Override setPreviousEvictable(ReferenceEntry<K, V> previous)1678 public void setPreviousEvictable(ReferenceEntry<K, V> previous) { 1679 this.previousEvictable = previous; 1680 } 1681 } 1682 1683 /** 1684 * References a weak value. 1685 */ 1686 static final class WeakValueReference<K, V> 1687 extends WeakReference<V> implements ValueReference<K, V> { 1688 final ReferenceEntry<K, V> entry; 1689 WeakValueReference(ReferenceQueue<V> queue, V referent, ReferenceEntry<K, V> entry)1690 WeakValueReference(ReferenceQueue<V> queue, V referent, ReferenceEntry<K, V> entry) { 1691 super(referent, queue); 1692 this.entry = entry; 1693 } 1694 1695 @Override getEntry()1696 public ReferenceEntry<K, V> getEntry() { 1697 return entry; 1698 } 1699 1700 @Override clear(ValueReference<K, V> newValue)1701 public void clear(ValueReference<K, V> newValue) { 1702 clear(); 1703 } 1704 1705 @Override copyFor( ReferenceQueue<V> queue, V value, ReferenceEntry<K, V> entry)1706 public ValueReference<K, V> copyFor( 1707 ReferenceQueue<V> queue, V value, ReferenceEntry<K, V> entry) { 1708 return new WeakValueReference<K, V>(queue, value, entry); 1709 } 1710 1711 @Override isComputingReference()1712 public boolean isComputingReference() { 1713 return false; 1714 } 1715 1716 @Override waitForValue()1717 public V waitForValue() { 1718 return get(); 1719 } 1720 } 1721 1722 /** 1723 * References a soft value. 1724 */ 1725 static final class SoftValueReference<K, V> 1726 extends SoftReference<V> implements ValueReference<K, V> { 1727 final ReferenceEntry<K, V> entry; 1728 SoftValueReference(ReferenceQueue<V> queue, V referent, ReferenceEntry<K, V> entry)1729 SoftValueReference(ReferenceQueue<V> queue, V referent, ReferenceEntry<K, V> entry) { 1730 super(referent, queue); 1731 this.entry = entry; 1732 } 1733 1734 @Override getEntry()1735 public ReferenceEntry<K, V> getEntry() { 1736 return entry; 1737 } 1738 1739 @Override clear(ValueReference<K, V> newValue)1740 public void clear(ValueReference<K, V> newValue) { 1741 clear(); 1742 } 1743 1744 @Override copyFor( ReferenceQueue<V> queue, V value, ReferenceEntry<K, V> entry)1745 public ValueReference<K, V> copyFor( 1746 ReferenceQueue<V> queue, V value, ReferenceEntry<K, V> entry) { 1747 return new SoftValueReference<K, V>(queue, value, entry); 1748 } 1749 1750 @Override isComputingReference()1751 public boolean isComputingReference() { 1752 return false; 1753 } 1754 1755 @Override waitForValue()1756 public V waitForValue() { 1757 return get(); 1758 } 1759 } 1760 1761 /** 1762 * References a strong value. 1763 */ 1764 static final class StrongValueReference<K, V> implements ValueReference<K, V> { 1765 final V referent; 1766 StrongValueReference(V referent)1767 StrongValueReference(V referent) { 1768 this.referent = referent; 1769 } 1770 1771 @Override get()1772 public V get() { 1773 return referent; 1774 } 1775 1776 @Override getEntry()1777 public ReferenceEntry<K, V> getEntry() { 1778 return null; 1779 } 1780 1781 @Override copyFor( ReferenceQueue<V> queue, V value, ReferenceEntry<K, V> entry)1782 public ValueReference<K, V> copyFor( 1783 ReferenceQueue<V> queue, V value, ReferenceEntry<K, V> entry) { 1784 return this; 1785 } 1786 1787 @Override isComputingReference()1788 public boolean isComputingReference() { 1789 return false; 1790 } 1791 1792 @Override waitForValue()1793 public V waitForValue() { 1794 return get(); 1795 } 1796 1797 @Override clear(ValueReference<K, V> newValue)1798 public void clear(ValueReference<K, V> newValue) {} 1799 } 1800 1801 /** 1802 * Applies a supplemental hash function to a given hash code, which defends against poor quality 1803 * hash functions. This is critical when the concurrent hash map uses power-of-two length hash 1804 * tables, that otherwise encounter collisions for hash codes that do not differ in lower or 1805 * upper bits. 1806 * 1807 * @param h hash code 1808 */ rehash(int h)1809 static int rehash(int h) { 1810 // Spread bits to regularize both segment and index locations, 1811 // using variant of single-word Wang/Jenkins hash. 1812 // TODO(kevinb): use Hashing/move this to Hashing? 1813 h += (h << 15) ^ 0xffffcd7d; 1814 h ^= (h >>> 10); 1815 h += (h << 3); 1816 h ^= (h >>> 6); 1817 h += (h << 2) + (h << 14); 1818 return h ^ (h >>> 16); 1819 } 1820 1821 /** 1822 * This method is a convenience for testing. Code should call {@link Segment#newEntry} directly. 1823 */ 1824 // Guarded By Segment.this 1825 @VisibleForTesting newEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next)1826 ReferenceEntry<K, V> newEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) { 1827 return segmentFor(hash).newEntry(key, hash, next); 1828 } 1829 1830 /** 1831 * This method is a convenience for testing. Code should call {@link Segment#copyEntry} directly. 1832 */ 1833 // Guarded By Segment.this 1834 @VisibleForTesting copyEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext)1835 ReferenceEntry<K, V> copyEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) { 1836 int hash = original.getHash(); 1837 return segmentFor(hash).copyEntry(original, newNext); 1838 } 1839 1840 /** 1841 * This method is a convenience for testing. Code should call {@link Segment#setValue} instead. 1842 */ 1843 // Guarded By Segment.this 1844 @VisibleForTesting newValueReference(ReferenceEntry<K, V> entry, V value)1845 ValueReference<K, V> newValueReference(ReferenceEntry<K, V> entry, V value) { 1846 int hash = entry.getHash(); 1847 return valueStrength.referenceValue(segmentFor(hash), entry, value); 1848 } 1849 hash(Object key)1850 int hash(Object key) { 1851 int h = keyEquivalence.hash(key); 1852 return rehash(h); 1853 } 1854 reclaimValue(ValueReference<K, V> valueReference)1855 void reclaimValue(ValueReference<K, V> valueReference) { 1856 ReferenceEntry<K, V> entry = valueReference.getEntry(); 1857 int hash = entry.getHash(); 1858 segmentFor(hash).reclaimValue(entry.getKey(), hash, valueReference); 1859 } 1860 reclaimKey(ReferenceEntry<K, V> entry)1861 void reclaimKey(ReferenceEntry<K, V> entry) { 1862 int hash = entry.getHash(); 1863 segmentFor(hash).reclaimKey(entry, hash); 1864 } 1865 1866 /** 1867 * This method is a convenience for testing. Code should call {@link Segment#getLiveValue} 1868 * instead. 1869 */ 1870 @VisibleForTesting isLive(ReferenceEntry<K, V> entry)1871 boolean isLive(ReferenceEntry<K, V> entry) { 1872 return segmentFor(entry.getHash()).getLiveValue(entry) != null; 1873 } 1874 1875 /** 1876 * Returns the segment that should be used for a key with the given hash. 1877 * 1878 * @param hash the hash code for the key 1879 * @return the segment 1880 */ segmentFor(int hash)1881 Segment<K, V> segmentFor(int hash) { 1882 // TODO(fry): Lazily create segments? 1883 return segments[(hash >>> segmentShift) & segmentMask]; 1884 } 1885 createSegment(int initialCapacity, int maxSegmentSize)1886 Segment<K, V> createSegment(int initialCapacity, int maxSegmentSize) { 1887 return new Segment<K, V>(this, initialCapacity, maxSegmentSize); 1888 } 1889 1890 /** 1891 * Gets the value from an entry. Returns {@code null} if the entry is invalid, 1892 * partially-collected, computing, or expired. Unlike {@link Segment#getLiveValue} this method 1893 * does not attempt to clean up stale entries. 1894 */ getLiveValue(ReferenceEntry<K, V> entry)1895 V getLiveValue(ReferenceEntry<K, V> entry) { 1896 if (entry.getKey() == null) { 1897 return null; 1898 } 1899 V value = entry.getValueReference().get(); 1900 if (value == null) { 1901 return null; 1902 } 1903 1904 if (expires() && isExpired(entry)) { 1905 return null; 1906 } 1907 return value; 1908 } 1909 1910 // expiration 1911 1912 /** 1913 * Returns {@code true} if the entry has expired. 1914 */ isExpired(ReferenceEntry<K, V> entry)1915 boolean isExpired(ReferenceEntry<K, V> entry) { 1916 return isExpired(entry, ticker.read()); 1917 } 1918 1919 /** 1920 * Returns {@code true} if the entry has expired. 1921 */ isExpired(ReferenceEntry<K, V> entry, long now)1922 boolean isExpired(ReferenceEntry<K, V> entry, long now) { 1923 // if the expiration time had overflowed, this "undoes" the overflow 1924 return now - entry.getExpirationTime() > 0; 1925 } 1926 1927 // Guarded By Segment.this connectExpirables(ReferenceEntry<K, V> previous, ReferenceEntry<K, V> next)1928 static <K, V> void connectExpirables(ReferenceEntry<K, V> previous, ReferenceEntry<K, V> next) { 1929 previous.setNextExpirable(next); 1930 next.setPreviousExpirable(previous); 1931 } 1932 1933 // Guarded By Segment.this nullifyExpirable(ReferenceEntry<K, V> nulled)1934 static <K, V> void nullifyExpirable(ReferenceEntry<K, V> nulled) { 1935 ReferenceEntry<K, V> nullEntry = nullEntry(); 1936 nulled.setNextExpirable(nullEntry); 1937 nulled.setPreviousExpirable(nullEntry); 1938 } 1939 1940 // eviction 1941 1942 /** 1943 * Notifies listeners that an entry has been automatically removed due to expiration, eviction, 1944 * or eligibility for garbage collection. This should be called every time expireEntries or 1945 * evictEntry is called (once the lock is released). 1946 */ processPendingNotifications()1947 void processPendingNotifications() { 1948 RemovalNotification<K, V> notification; 1949 while ((notification = removalNotificationQueue.poll()) != null) { 1950 try { 1951 removalListener.onRemoval(notification); 1952 } catch (Exception e) { 1953 logger.log(Level.WARNING, "Exception thrown by removal listener", e); 1954 } 1955 } 1956 } 1957 1958 /** Links the evitables together. */ 1959 // Guarded By Segment.this connectEvictables(ReferenceEntry<K, V> previous, ReferenceEntry<K, V> next)1960 static <K, V> void connectEvictables(ReferenceEntry<K, V> previous, ReferenceEntry<K, V> next) { 1961 previous.setNextEvictable(next); 1962 next.setPreviousEvictable(previous); 1963 } 1964 1965 // Guarded By Segment.this nullifyEvictable(ReferenceEntry<K, V> nulled)1966 static <K, V> void nullifyEvictable(ReferenceEntry<K, V> nulled) { 1967 ReferenceEntry<K, V> nullEntry = nullEntry(); 1968 nulled.setNextEvictable(nullEntry); 1969 nulled.setPreviousEvictable(nullEntry); 1970 } 1971 1972 @SuppressWarnings("unchecked") newSegmentArray(int ssize)1973 final Segment<K, V>[] newSegmentArray(int ssize) { 1974 return new Segment[ssize]; 1975 } 1976 1977 // Inner Classes 1978 1979 /** 1980 * Segments are specialized versions of hash tables. This subclass inherits from ReentrantLock 1981 * opportunistically, just to simplify some locking and avoid separate construction. 1982 */ 1983 @SuppressWarnings("serial") // This class is never serialized. 1984 static class Segment<K, V> extends ReentrantLock { 1985 1986 /* 1987 * TODO(fry): Consider copying variables (like evictsBySize) from outer class into this class. 1988 * It will require more memory but will reduce indirection. 1989 */ 1990 1991 /* 1992 * Segments maintain a table of entry lists that are ALWAYS kept in a consistent state, so can 1993 * be read without locking. Next fields of nodes are immutable (final). All list additions are 1994 * performed at the front of each bin. This makes it easy to check changes, and also fast to 1995 * traverse. When nodes would otherwise be changed, new nodes are created to replace them. This 1996 * works well for hash tables since the bin lists tend to be short. (The average length is less 1997 * than two.) 1998 * 1999 * Read operations can thus proceed without locking, but rely on selected uses of volatiles to 2000 * ensure that completed write operations performed by other threads are noticed. For most 2001 * purposes, the "count" field, tracking the number of elements, serves as that volatile 2002 * variable ensuring visibility. This is convenient because this field needs to be read in many 2003 * read operations anyway: 2004 * 2005 * - All (unsynchronized) read operations must first read the "count" field, and should not 2006 * look at table entries if it is 0. 2007 * 2008 * - All (synchronized) write operations should write to the "count" field after structurally 2009 * changing any bin. The operations must not take any action that could even momentarily 2010 * cause a concurrent read operation to see inconsistent data. This is made easier by the 2011 * nature of the read operations in Map. For example, no operation can reveal that the table 2012 * has grown but the threshold has not yet been updated, so there are no atomicity requirements 2013 * for this with respect to reads. 2014 * 2015 * As a guide, all critical volatile reads and writes to the count field are marked in code 2016 * comments. 2017 */ 2018 2019 final MapMakerInternalMap<K, V> map; 2020 2021 /** 2022 * The number of live elements in this segment's region. This does not include unset elements 2023 * which are awaiting cleanup. 2024 */ 2025 volatile int count; 2026 2027 /** 2028 * Number of updates that alter the size of the table. This is used during bulk-read methods to 2029 * make sure they see a consistent snapshot: If modCounts change during a traversal of segments 2030 * computing size or checking containsValue, then we might have an inconsistent view of state 2031 * so (usually) must retry. 2032 */ 2033 int modCount; 2034 2035 /** 2036 * The table is expanded when its size exceeds this threshold. (The value of this field is 2037 * always {@code (int) (capacity * 0.75)}.) 2038 */ 2039 int threshold; 2040 2041 /** 2042 * The per-segment table. 2043 */ 2044 volatile AtomicReferenceArray<ReferenceEntry<K, V>> table; 2045 2046 /** 2047 * The maximum size of this map. MapMaker.UNSET_INT if there is no maximum. 2048 */ 2049 final int maxSegmentSize; 2050 2051 /** 2052 * The key reference queue contains entries whose keys have been garbage collected, and which 2053 * need to be cleaned up internally. 2054 */ 2055 final ReferenceQueue<K> keyReferenceQueue; 2056 2057 /** 2058 * The value reference queue contains value references whose values have been garbage collected, 2059 * and which need to be cleaned up internally. 2060 */ 2061 final ReferenceQueue<V> valueReferenceQueue; 2062 2063 /** 2064 * The recency queue is used to record which entries were accessed for updating the eviction 2065 * list's ordering. It is drained as a batch operation when either the DRAIN_THRESHOLD is 2066 * crossed or a write occurs on the segment. 2067 */ 2068 final Queue<ReferenceEntry<K, V>> recencyQueue; 2069 2070 /** 2071 * A counter of the number of reads since the last write, used to drain queues on a small 2072 * fraction of read operations. 2073 */ 2074 final AtomicInteger readCount = new AtomicInteger(); 2075 2076 /** 2077 * A queue of elements currently in the map, ordered by access time. Elements are added to the 2078 * tail of the queue on access/write. 2079 */ 2080 @GuardedBy("Segment.this") 2081 final Queue<ReferenceEntry<K, V>> evictionQueue; 2082 2083 /** 2084 * A queue of elements currently in the map, ordered by expiration time (either access or write 2085 * time). Elements are added to the tail of the queue on access/write. 2086 */ 2087 @GuardedBy("Segment.this") 2088 final Queue<ReferenceEntry<K, V>> expirationQueue; 2089 Segment(MapMakerInternalMap<K, V> map, int initialCapacity, int maxSegmentSize)2090 Segment(MapMakerInternalMap<K, V> map, int initialCapacity, int maxSegmentSize) { 2091 this.map = map; 2092 this.maxSegmentSize = maxSegmentSize; 2093 initTable(newEntryArray(initialCapacity)); 2094 2095 keyReferenceQueue = map.usesKeyReferences() 2096 ? new ReferenceQueue<K>() : null; 2097 2098 valueReferenceQueue = map.usesValueReferences() 2099 ? new ReferenceQueue<V>() : null; 2100 2101 recencyQueue = (map.evictsBySize() || map.expiresAfterAccess()) 2102 ? new ConcurrentLinkedQueue<ReferenceEntry<K, V>>() 2103 : MapMakerInternalMap.<ReferenceEntry<K, V>>discardingQueue(); 2104 2105 evictionQueue = map.evictsBySize() 2106 ? new EvictionQueue<K, V>() 2107 : MapMakerInternalMap.<ReferenceEntry<K, V>>discardingQueue(); 2108 2109 expirationQueue = map.expires() 2110 ? new ExpirationQueue<K, V>() 2111 : MapMakerInternalMap.<ReferenceEntry<K, V>>discardingQueue(); 2112 } 2113 newEntryArray(int size)2114 AtomicReferenceArray<ReferenceEntry<K, V>> newEntryArray(int size) { 2115 return new AtomicReferenceArray<ReferenceEntry<K, V>>(size); 2116 } 2117 initTable(AtomicReferenceArray<ReferenceEntry<K, V>> newTable)2118 void initTable(AtomicReferenceArray<ReferenceEntry<K, V>> newTable) { 2119 this.threshold = newTable.length() * 3 / 4; // 0.75 2120 if (this.threshold == maxSegmentSize) { 2121 // prevent spurious expansion before eviction 2122 this.threshold++; 2123 } 2124 this.table = newTable; 2125 } 2126 2127 @GuardedBy("Segment.this") newEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next)2128 ReferenceEntry<K, V> newEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) { 2129 return map.entryFactory.newEntry(this, key, hash, next); 2130 } 2131 2132 /** 2133 * Copies {@code original} into a new entry chained to {@code newNext}. Returns the new entry, 2134 * or {@code null} if {@code original} was already garbage collected. 2135 */ 2136 @GuardedBy("Segment.this") copyEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext)2137 ReferenceEntry<K, V> copyEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) { 2138 if (original.getKey() == null) { 2139 // key collected 2140 return null; 2141 } 2142 2143 ValueReference<K, V> valueReference = original.getValueReference(); 2144 V value = valueReference.get(); 2145 if ((value == null) && !valueReference.isComputingReference()) { 2146 // value collected 2147 return null; 2148 } 2149 2150 ReferenceEntry<K, V> newEntry = map.entryFactory.copyEntry(this, original, newNext); 2151 newEntry.setValueReference(valueReference.copyFor(this.valueReferenceQueue, value, newEntry)); 2152 return newEntry; 2153 } 2154 2155 /** 2156 * Sets a new value of an entry. Adds newly created entries at the end of the expiration queue. 2157 */ 2158 @GuardedBy("Segment.this") setValue(ReferenceEntry<K, V> entry, V value)2159 void setValue(ReferenceEntry<K, V> entry, V value) { 2160 ValueReference<K, V> valueReference = map.valueStrength.referenceValue(this, entry, value); 2161 entry.setValueReference(valueReference); 2162 recordWrite(entry); 2163 } 2164 2165 // reference queues, for garbage collection cleanup 2166 2167 /** 2168 * Cleanup collected entries when the lock is available. 2169 */ tryDrainReferenceQueues()2170 void tryDrainReferenceQueues() { 2171 if (tryLock()) { 2172 try { 2173 drainReferenceQueues(); 2174 } finally { 2175 unlock(); 2176 } 2177 } 2178 } 2179 2180 /** 2181 * Drain the key and value reference queues, cleaning up internal entries containing garbage 2182 * collected keys or values. 2183 */ 2184 @GuardedBy("Segment.this") drainReferenceQueues()2185 void drainReferenceQueues() { 2186 if (map.usesKeyReferences()) { 2187 drainKeyReferenceQueue(); 2188 } 2189 if (map.usesValueReferences()) { 2190 drainValueReferenceQueue(); 2191 } 2192 } 2193 2194 @GuardedBy("Segment.this") drainKeyReferenceQueue()2195 void drainKeyReferenceQueue() { 2196 Reference<? extends K> ref; 2197 int i = 0; 2198 while ((ref = keyReferenceQueue.poll()) != null) { 2199 @SuppressWarnings("unchecked") 2200 ReferenceEntry<K, V> entry = (ReferenceEntry<K, V>) ref; 2201 map.reclaimKey(entry); 2202 if (++i == DRAIN_MAX) { 2203 break; 2204 } 2205 } 2206 } 2207 2208 @GuardedBy("Segment.this") drainValueReferenceQueue()2209 void drainValueReferenceQueue() { 2210 Reference<? extends V> ref; 2211 int i = 0; 2212 while ((ref = valueReferenceQueue.poll()) != null) { 2213 @SuppressWarnings("unchecked") 2214 ValueReference<K, V> valueReference = (ValueReference<K, V>) ref; 2215 map.reclaimValue(valueReference); 2216 if (++i == DRAIN_MAX) { 2217 break; 2218 } 2219 } 2220 } 2221 2222 /** 2223 * Clears all entries from the key and value reference queues. 2224 */ clearReferenceQueues()2225 void clearReferenceQueues() { 2226 if (map.usesKeyReferences()) { 2227 clearKeyReferenceQueue(); 2228 } 2229 if (map.usesValueReferences()) { 2230 clearValueReferenceQueue(); 2231 } 2232 } 2233 clearKeyReferenceQueue()2234 void clearKeyReferenceQueue() { 2235 while (keyReferenceQueue.poll() != null) {} 2236 } 2237 clearValueReferenceQueue()2238 void clearValueReferenceQueue() { 2239 while (valueReferenceQueue.poll() != null) {} 2240 } 2241 2242 // recency queue, shared by expiration and eviction 2243 2244 /** 2245 * Records the relative order in which this read was performed by adding {@code entry} to the 2246 * recency queue. At write-time, or when the queue is full past the threshold, the queue will 2247 * be drained and the entries therein processed. 2248 * 2249 * <p>Note: locked reads should use {@link #recordLockedRead}. 2250 */ recordRead(ReferenceEntry<K, V> entry)2251 void recordRead(ReferenceEntry<K, V> entry) { 2252 if (map.expiresAfterAccess()) { 2253 recordExpirationTime(entry, map.expireAfterAccessNanos); 2254 } 2255 recencyQueue.add(entry); 2256 } 2257 2258 /** 2259 * Updates the eviction metadata that {@code entry} was just read. This currently amounts to 2260 * adding {@code entry} to relevant eviction lists. 2261 * 2262 * <p>Note: this method should only be called under lock, as it directly manipulates the 2263 * eviction queues. Unlocked reads should use {@link #recordRead}. 2264 */ 2265 @GuardedBy("Segment.this") recordLockedRead(ReferenceEntry<K, V> entry)2266 void recordLockedRead(ReferenceEntry<K, V> entry) { 2267 evictionQueue.add(entry); 2268 if (map.expiresAfterAccess()) { 2269 recordExpirationTime(entry, map.expireAfterAccessNanos); 2270 expirationQueue.add(entry); 2271 } 2272 } 2273 2274 /** 2275 * Updates eviction metadata that {@code entry} was just written. This currently amounts to 2276 * adding {@code entry} to relevant eviction lists. 2277 */ 2278 @GuardedBy("Segment.this") recordWrite(ReferenceEntry<K, V> entry)2279 void recordWrite(ReferenceEntry<K, V> entry) { 2280 // we are already under lock, so drain the recency queue immediately 2281 drainRecencyQueue(); 2282 evictionQueue.add(entry); 2283 if (map.expires()) { 2284 // currently MapMaker ensures that expireAfterWrite and 2285 // expireAfterAccess are mutually exclusive 2286 long expiration = map.expiresAfterAccess() 2287 ? map.expireAfterAccessNanos 2288 : map.expireAfterWriteNanos; 2289 recordExpirationTime(entry, expiration); 2290 expirationQueue.add(entry); 2291 } 2292 } 2293 2294 /** 2295 * Drains the recency queue, updating eviction metadata that the entries therein were read in 2296 * the specified relative order. This currently amounts to adding them to relevant eviction 2297 * lists (accounting for the fact that they could have been removed from the map since being 2298 * added to the recency queue). 2299 */ 2300 @GuardedBy("Segment.this") drainRecencyQueue()2301 void drainRecencyQueue() { 2302 ReferenceEntry<K, V> e; 2303 while ((e = recencyQueue.poll()) != null) { 2304 // An entry may be in the recency queue despite it being removed from 2305 // the map . This can occur when the entry was concurrently read while a 2306 // writer is removing it from the segment or after a clear has removed 2307 // all of the segment's entries. 2308 if (evictionQueue.contains(e)) { 2309 evictionQueue.add(e); 2310 } 2311 if (map.expiresAfterAccess() && expirationQueue.contains(e)) { 2312 expirationQueue.add(e); 2313 } 2314 } 2315 } 2316 2317 // expiration 2318 recordExpirationTime(ReferenceEntry<K, V> entry, long expirationNanos)2319 void recordExpirationTime(ReferenceEntry<K, V> entry, long expirationNanos) { 2320 // might overflow, but that's okay (see isExpired()) 2321 entry.setExpirationTime(map.ticker.read() + expirationNanos); 2322 } 2323 2324 /** 2325 * Cleanup expired entries when the lock is available. 2326 */ tryExpireEntries()2327 void tryExpireEntries() { 2328 if (tryLock()) { 2329 try { 2330 expireEntries(); 2331 } finally { 2332 unlock(); 2333 // don't call postWriteCleanup as we're in a read 2334 } 2335 } 2336 } 2337 2338 @GuardedBy("Segment.this") expireEntries()2339 void expireEntries() { 2340 drainRecencyQueue(); 2341 2342 if (expirationQueue.isEmpty()) { 2343 // There's no point in calling nanoTime() if we have no entries to 2344 // expire. 2345 return; 2346 } 2347 long now = map.ticker.read(); 2348 ReferenceEntry<K, V> e; 2349 while ((e = expirationQueue.peek()) != null && map.isExpired(e, now)) { 2350 if (!removeEntry(e, e.getHash(), RemovalCause.EXPIRED)) { 2351 throw new AssertionError(); 2352 } 2353 } 2354 } 2355 2356 // eviction 2357 enqueueNotification(ReferenceEntry<K, V> entry, RemovalCause cause)2358 void enqueueNotification(ReferenceEntry<K, V> entry, RemovalCause cause) { 2359 enqueueNotification(entry.getKey(), entry.getHash(), entry.getValueReference().get(), cause); 2360 } 2361 enqueueNotification(@ullable K key, int hash, @Nullable V value, RemovalCause cause)2362 void enqueueNotification(@Nullable K key, int hash, @Nullable V value, RemovalCause cause) { 2363 if (map.removalNotificationQueue != DISCARDING_QUEUE) { 2364 RemovalNotification<K, V> notification = new RemovalNotification<K, V>(key, value, cause); 2365 map.removalNotificationQueue.offer(notification); 2366 } 2367 } 2368 2369 /** 2370 * Performs eviction if the segment is full. This should only be called prior to adding a new 2371 * entry and increasing {@code count}. 2372 * 2373 * @return {@code true} if eviction occurred 2374 */ 2375 @GuardedBy("Segment.this") evictEntries()2376 boolean evictEntries() { 2377 if (map.evictsBySize() && count >= maxSegmentSize) { 2378 drainRecencyQueue(); 2379 2380 ReferenceEntry<K, V> e = evictionQueue.remove(); 2381 if (!removeEntry(e, e.getHash(), RemovalCause.SIZE)) { 2382 throw new AssertionError(); 2383 } 2384 return true; 2385 } 2386 return false; 2387 } 2388 2389 /** 2390 * Returns first entry of bin for given hash. 2391 */ getFirst(int hash)2392 ReferenceEntry<K, V> getFirst(int hash) { 2393 // read this volatile field only once 2394 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2395 return table.get(hash & (table.length() - 1)); 2396 } 2397 2398 // Specialized implementations of map methods 2399 getEntry(Object key, int hash)2400 ReferenceEntry<K, V> getEntry(Object key, int hash) { 2401 if (count != 0) { // read-volatile 2402 for (ReferenceEntry<K, V> e = getFirst(hash); e != null; e = e.getNext()) { 2403 if (e.getHash() != hash) { 2404 continue; 2405 } 2406 2407 K entryKey = e.getKey(); 2408 if (entryKey == null) { 2409 tryDrainReferenceQueues(); 2410 continue; 2411 } 2412 2413 if (map.keyEquivalence.equivalent(key, entryKey)) { 2414 return e; 2415 } 2416 } 2417 } 2418 2419 return null; 2420 } 2421 getLiveEntry(Object key, int hash)2422 ReferenceEntry<K, V> getLiveEntry(Object key, int hash) { 2423 ReferenceEntry<K, V> e = getEntry(key, hash); 2424 if (e == null) { 2425 return null; 2426 } else if (map.expires() && map.isExpired(e)) { 2427 tryExpireEntries(); 2428 return null; 2429 } 2430 return e; 2431 } 2432 get(Object key, int hash)2433 V get(Object key, int hash) { 2434 try { 2435 ReferenceEntry<K, V> e = getLiveEntry(key, hash); 2436 if (e == null) { 2437 return null; 2438 } 2439 2440 V value = e.getValueReference().get(); 2441 if (value != null) { 2442 recordRead(e); 2443 } else { 2444 tryDrainReferenceQueues(); 2445 } 2446 return value; 2447 } finally { 2448 postReadCleanup(); 2449 } 2450 } 2451 containsKey(Object key, int hash)2452 boolean containsKey(Object key, int hash) { 2453 try { 2454 if (count != 0) { // read-volatile 2455 ReferenceEntry<K, V> e = getLiveEntry(key, hash); 2456 if (e == null) { 2457 return false; 2458 } 2459 return e.getValueReference().get() != null; 2460 } 2461 2462 return false; 2463 } finally { 2464 postReadCleanup(); 2465 } 2466 } 2467 2468 /** 2469 * This method is a convenience for testing. Code should call {@link 2470 * MapMakerInternalMap#containsValue} directly. 2471 */ 2472 @VisibleForTesting containsValue(Object value)2473 boolean containsValue(Object value) { 2474 try { 2475 if (count != 0) { // read-volatile 2476 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2477 int length = table.length(); 2478 for (int i = 0; i < length; ++i) { 2479 for (ReferenceEntry<K, V> e = table.get(i); e != null; e = e.getNext()) { 2480 V entryValue = getLiveValue(e); 2481 if (entryValue == null) { 2482 continue; 2483 } 2484 if (map.valueEquivalence.equivalent(value, entryValue)) { 2485 return true; 2486 } 2487 } 2488 } 2489 } 2490 2491 return false; 2492 } finally { 2493 postReadCleanup(); 2494 } 2495 } 2496 put(K key, int hash, V value, boolean onlyIfAbsent)2497 V put(K key, int hash, V value, boolean onlyIfAbsent) { 2498 lock(); 2499 try { 2500 preWriteCleanup(); 2501 2502 int newCount = this.count + 1; 2503 if (newCount > this.threshold) { // ensure capacity 2504 expand(); 2505 newCount = this.count + 1; 2506 } 2507 2508 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2509 int index = hash & (table.length() - 1); 2510 ReferenceEntry<K, V> first = table.get(index); 2511 2512 // Look for an existing entry. 2513 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 2514 K entryKey = e.getKey(); 2515 if (e.getHash() == hash && entryKey != null 2516 && map.keyEquivalence.equivalent(key, entryKey)) { 2517 // We found an existing entry. 2518 2519 ValueReference<K, V> valueReference = e.getValueReference(); 2520 V entryValue = valueReference.get(); 2521 2522 if (entryValue == null) { 2523 ++modCount; 2524 setValue(e, value); 2525 if (!valueReference.isComputingReference()) { 2526 enqueueNotification(key, hash, entryValue, RemovalCause.COLLECTED); 2527 newCount = this.count; // count remains unchanged 2528 } else if (evictEntries()) { // evictEntries after setting new value 2529 newCount = this.count + 1; 2530 } 2531 this.count = newCount; // write-volatile 2532 return null; 2533 } else if (onlyIfAbsent) { 2534 // Mimic 2535 // "if (!map.containsKey(key)) ... 2536 // else return map.get(key); 2537 recordLockedRead(e); 2538 return entryValue; 2539 } else { 2540 // clobber existing entry, count remains unchanged 2541 ++modCount; 2542 enqueueNotification(key, hash, entryValue, RemovalCause.REPLACED); 2543 setValue(e, value); 2544 return entryValue; 2545 } 2546 } 2547 } 2548 2549 // Create a new entry. 2550 ++modCount; 2551 ReferenceEntry<K, V> newEntry = newEntry(key, hash, first); 2552 setValue(newEntry, value); 2553 table.set(index, newEntry); 2554 if (evictEntries()) { // evictEntries after setting new value 2555 newCount = this.count + 1; 2556 } 2557 this.count = newCount; // write-volatile 2558 return null; 2559 } finally { 2560 unlock(); 2561 postWriteCleanup(); 2562 } 2563 } 2564 2565 /** 2566 * Expands the table if possible. 2567 */ 2568 @GuardedBy("Segment.this") expand()2569 void expand() { 2570 AtomicReferenceArray<ReferenceEntry<K, V>> oldTable = table; 2571 int oldCapacity = oldTable.length(); 2572 if (oldCapacity >= MAXIMUM_CAPACITY) { 2573 return; 2574 } 2575 2576 /* 2577 * Reclassify nodes in each list to new Map. Because we are using power-of-two expansion, the 2578 * elements from each bin must either stay at same index, or move with a power of two offset. 2579 * We eliminate unnecessary node creation by catching cases where old nodes can be reused 2580 * because their next fields won't change. Statistically, at the default threshold, only 2581 * about one-sixth of them need cloning when a table doubles. The nodes they replace will be 2582 * garbage collectable as soon as they are no longer referenced by any reader thread that may 2583 * be in the midst of traversing table right now. 2584 */ 2585 2586 int newCount = count; 2587 AtomicReferenceArray<ReferenceEntry<K, V>> newTable = newEntryArray(oldCapacity << 1); 2588 threshold = newTable.length() * 3 / 4; 2589 int newMask = newTable.length() - 1; 2590 for (int oldIndex = 0; oldIndex < oldCapacity; ++oldIndex) { 2591 // We need to guarantee that any existing reads of old Map can 2592 // proceed. So we cannot yet null out each bin. 2593 ReferenceEntry<K, V> head = oldTable.get(oldIndex); 2594 2595 if (head != null) { 2596 ReferenceEntry<K, V> next = head.getNext(); 2597 int headIndex = head.getHash() & newMask; 2598 2599 // Single node on list 2600 if (next == null) { 2601 newTable.set(headIndex, head); 2602 } else { 2603 // Reuse the consecutive sequence of nodes with the same target 2604 // index from the end of the list. tail points to the first 2605 // entry in the reusable list. 2606 ReferenceEntry<K, V> tail = head; 2607 int tailIndex = headIndex; 2608 for (ReferenceEntry<K, V> e = next; e != null; e = e.getNext()) { 2609 int newIndex = e.getHash() & newMask; 2610 if (newIndex != tailIndex) { 2611 // The index changed. We'll need to copy the previous entry. 2612 tailIndex = newIndex; 2613 tail = e; 2614 } 2615 } 2616 newTable.set(tailIndex, tail); 2617 2618 // Clone nodes leading up to the tail. 2619 for (ReferenceEntry<K, V> e = head; e != tail; e = e.getNext()) { 2620 int newIndex = e.getHash() & newMask; 2621 ReferenceEntry<K, V> newNext = newTable.get(newIndex); 2622 ReferenceEntry<K, V> newFirst = copyEntry(e, newNext); 2623 if (newFirst != null) { 2624 newTable.set(newIndex, newFirst); 2625 } else { 2626 removeCollectedEntry(e); 2627 newCount--; 2628 } 2629 } 2630 } 2631 } 2632 } 2633 table = newTable; 2634 this.count = newCount; 2635 } 2636 replace(K key, int hash, V oldValue, V newValue)2637 boolean replace(K key, int hash, V oldValue, V newValue) { 2638 lock(); 2639 try { 2640 preWriteCleanup(); 2641 2642 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2643 int index = hash & (table.length() - 1); 2644 ReferenceEntry<K, V> first = table.get(index); 2645 2646 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 2647 K entryKey = e.getKey(); 2648 if (e.getHash() == hash && entryKey != null 2649 && map.keyEquivalence.equivalent(key, entryKey)) { 2650 // If the value disappeared, this entry is partially collected, 2651 // and we should pretend like it doesn't exist. 2652 ValueReference<K, V> valueReference = e.getValueReference(); 2653 V entryValue = valueReference.get(); 2654 if (entryValue == null) { 2655 if (isCollected(valueReference)) { 2656 int newCount = this.count - 1; 2657 ++modCount; 2658 enqueueNotification(entryKey, hash, entryValue, RemovalCause.COLLECTED); 2659 ReferenceEntry<K, V> newFirst = removeFromChain(first, e); 2660 newCount = this.count - 1; 2661 table.set(index, newFirst); 2662 this.count = newCount; // write-volatile 2663 } 2664 return false; 2665 } 2666 2667 if (map.valueEquivalence.equivalent(oldValue, entryValue)) { 2668 ++modCount; 2669 enqueueNotification(key, hash, entryValue, RemovalCause.REPLACED); 2670 setValue(e, newValue); 2671 return true; 2672 } else { 2673 // Mimic 2674 // "if (map.containsKey(key) && map.get(key).equals(oldValue))..." 2675 recordLockedRead(e); 2676 return false; 2677 } 2678 } 2679 } 2680 2681 return false; 2682 } finally { 2683 unlock(); 2684 postWriteCleanup(); 2685 } 2686 } 2687 replace(K key, int hash, V newValue)2688 V replace(K key, int hash, V newValue) { 2689 lock(); 2690 try { 2691 preWriteCleanup(); 2692 2693 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2694 int index = hash & (table.length() - 1); 2695 ReferenceEntry<K, V> first = table.get(index); 2696 2697 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 2698 K entryKey = e.getKey(); 2699 if (e.getHash() == hash && entryKey != null 2700 && map.keyEquivalence.equivalent(key, entryKey)) { 2701 // If the value disappeared, this entry is partially collected, 2702 // and we should pretend like it doesn't exist. 2703 ValueReference<K, V> valueReference = e.getValueReference(); 2704 V entryValue = valueReference.get(); 2705 if (entryValue == null) { 2706 if (isCollected(valueReference)) { 2707 int newCount = this.count - 1; 2708 ++modCount; 2709 enqueueNotification(entryKey, hash, entryValue, RemovalCause.COLLECTED); 2710 ReferenceEntry<K, V> newFirst = removeFromChain(first, e); 2711 newCount = this.count - 1; 2712 table.set(index, newFirst); 2713 this.count = newCount; // write-volatile 2714 } 2715 return null; 2716 } 2717 2718 ++modCount; 2719 enqueueNotification(key, hash, entryValue, RemovalCause.REPLACED); 2720 setValue(e, newValue); 2721 return entryValue; 2722 } 2723 } 2724 2725 return null; 2726 } finally { 2727 unlock(); 2728 postWriteCleanup(); 2729 } 2730 } 2731 remove(Object key, int hash)2732 V remove(Object key, int hash) { 2733 lock(); 2734 try { 2735 preWriteCleanup(); 2736 2737 int newCount = this.count - 1; 2738 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2739 int index = hash & (table.length() - 1); 2740 ReferenceEntry<K, V> first = table.get(index); 2741 2742 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 2743 K entryKey = e.getKey(); 2744 if (e.getHash() == hash && entryKey != null 2745 && map.keyEquivalence.equivalent(key, entryKey)) { 2746 ValueReference<K, V> valueReference = e.getValueReference(); 2747 V entryValue = valueReference.get(); 2748 2749 RemovalCause cause; 2750 if (entryValue != null) { 2751 cause = RemovalCause.EXPLICIT; 2752 } else if (isCollected(valueReference)) { 2753 cause = RemovalCause.COLLECTED; 2754 } else { 2755 return null; 2756 } 2757 2758 ++modCount; 2759 enqueueNotification(entryKey, hash, entryValue, cause); 2760 ReferenceEntry<K, V> newFirst = removeFromChain(first, e); 2761 newCount = this.count - 1; 2762 table.set(index, newFirst); 2763 this.count = newCount; // write-volatile 2764 return entryValue; 2765 } 2766 } 2767 2768 return null; 2769 } finally { 2770 unlock(); 2771 postWriteCleanup(); 2772 } 2773 } 2774 remove(Object key, int hash, Object value)2775 boolean remove(Object key, int hash, Object value) { 2776 lock(); 2777 try { 2778 preWriteCleanup(); 2779 2780 int newCount = this.count - 1; 2781 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2782 int index = hash & (table.length() - 1); 2783 ReferenceEntry<K, V> first = table.get(index); 2784 2785 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 2786 K entryKey = e.getKey(); 2787 if (e.getHash() == hash && entryKey != null 2788 && map.keyEquivalence.equivalent(key, entryKey)) { 2789 ValueReference<K, V> valueReference = e.getValueReference(); 2790 V entryValue = valueReference.get(); 2791 2792 RemovalCause cause; 2793 if (map.valueEquivalence.equivalent(value, entryValue)) { 2794 cause = RemovalCause.EXPLICIT; 2795 } else if (isCollected(valueReference)) { 2796 cause = RemovalCause.COLLECTED; 2797 } else { 2798 return false; 2799 } 2800 2801 ++modCount; 2802 enqueueNotification(entryKey, hash, entryValue, cause); 2803 ReferenceEntry<K, V> newFirst = removeFromChain(first, e); 2804 newCount = this.count - 1; 2805 table.set(index, newFirst); 2806 this.count = newCount; // write-volatile 2807 return (cause == RemovalCause.EXPLICIT); 2808 } 2809 } 2810 2811 return false; 2812 } finally { 2813 unlock(); 2814 postWriteCleanup(); 2815 } 2816 } 2817 clear()2818 void clear() { 2819 if (count != 0) { 2820 lock(); 2821 try { 2822 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2823 if (map.removalNotificationQueue != DISCARDING_QUEUE) { 2824 for (int i = 0; i < table.length(); ++i) { 2825 for (ReferenceEntry<K, V> e = table.get(i); e != null; e = e.getNext()) { 2826 // Computing references aren't actually in the map yet. 2827 if (!e.getValueReference().isComputingReference()) { 2828 enqueueNotification(e, RemovalCause.EXPLICIT); 2829 } 2830 } 2831 } 2832 } 2833 for (int i = 0; i < table.length(); ++i) { 2834 table.set(i, null); 2835 } 2836 clearReferenceQueues(); 2837 evictionQueue.clear(); 2838 expirationQueue.clear(); 2839 readCount.set(0); 2840 2841 ++modCount; 2842 count = 0; // write-volatile 2843 } finally { 2844 unlock(); 2845 postWriteCleanup(); 2846 } 2847 } 2848 } 2849 2850 /** 2851 * Removes an entry from within a table. All entries following the removed node can stay, but 2852 * all preceding ones need to be cloned. 2853 * 2854 * <p>This method does not decrement count for the removed entry, but does decrement count for 2855 * all partially collected entries which are skipped. As such callers which are modifying count 2856 * must re-read it after calling removeFromChain. 2857 * 2858 * @param first the first entry of the table 2859 * @param entry the entry being removed from the table 2860 * @return the new first entry for the table 2861 */ 2862 @GuardedBy("Segment.this") removeFromChain(ReferenceEntry<K, V> first, ReferenceEntry<K, V> entry)2863 ReferenceEntry<K, V> removeFromChain(ReferenceEntry<K, V> first, ReferenceEntry<K, V> entry) { 2864 evictionQueue.remove(entry); 2865 expirationQueue.remove(entry); 2866 2867 int newCount = count; 2868 ReferenceEntry<K, V> newFirst = entry.getNext(); 2869 for (ReferenceEntry<K, V> e = first; e != entry; e = e.getNext()) { 2870 ReferenceEntry<K, V> next = copyEntry(e, newFirst); 2871 if (next != null) { 2872 newFirst = next; 2873 } else { 2874 removeCollectedEntry(e); 2875 newCount--; 2876 } 2877 } 2878 this.count = newCount; 2879 return newFirst; 2880 } 2881 removeCollectedEntry(ReferenceEntry<K, V> entry)2882 void removeCollectedEntry(ReferenceEntry<K, V> entry) { 2883 enqueueNotification(entry, RemovalCause.COLLECTED); 2884 evictionQueue.remove(entry); 2885 expirationQueue.remove(entry); 2886 } 2887 2888 /** 2889 * Removes an entry whose key has been garbage collected. 2890 */ reclaimKey(ReferenceEntry<K, V> entry, int hash)2891 boolean reclaimKey(ReferenceEntry<K, V> entry, int hash) { 2892 lock(); 2893 try { 2894 int newCount = count - 1; 2895 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2896 int index = hash & (table.length() - 1); 2897 ReferenceEntry<K, V> first = table.get(index); 2898 2899 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 2900 if (e == entry) { 2901 ++modCount; 2902 enqueueNotification( 2903 e.getKey(), hash, e.getValueReference().get(), RemovalCause.COLLECTED); 2904 ReferenceEntry<K, V> newFirst = removeFromChain(first, e); 2905 newCount = this.count - 1; 2906 table.set(index, newFirst); 2907 this.count = newCount; // write-volatile 2908 return true; 2909 } 2910 } 2911 2912 return false; 2913 } finally { 2914 unlock(); 2915 postWriteCleanup(); 2916 } 2917 } 2918 2919 /** 2920 * Removes an entry whose value has been garbage collected. 2921 */ reclaimValue(K key, int hash, ValueReference<K, V> valueReference)2922 boolean reclaimValue(K key, int hash, ValueReference<K, V> valueReference) { 2923 lock(); 2924 try { 2925 int newCount = this.count - 1; 2926 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2927 int index = hash & (table.length() - 1); 2928 ReferenceEntry<K, V> first = table.get(index); 2929 2930 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 2931 K entryKey = e.getKey(); 2932 if (e.getHash() == hash && entryKey != null 2933 && map.keyEquivalence.equivalent(key, entryKey)) { 2934 ValueReference<K, V> v = e.getValueReference(); 2935 if (v == valueReference) { 2936 ++modCount; 2937 enqueueNotification(key, hash, valueReference.get(), RemovalCause.COLLECTED); 2938 ReferenceEntry<K, V> newFirst = removeFromChain(first, e); 2939 newCount = this.count - 1; 2940 table.set(index, newFirst); 2941 this.count = newCount; // write-volatile 2942 return true; 2943 } 2944 return false; 2945 } 2946 } 2947 2948 return false; 2949 } finally { 2950 unlock(); 2951 if (!isHeldByCurrentThread()) { // don't cleanup inside of put 2952 postWriteCleanup(); 2953 } 2954 } 2955 } 2956 2957 /** 2958 * Clears a value that has not yet been set, and thus does not require count to be modified. 2959 */ clearValue(K key, int hash, ValueReference<K, V> valueReference)2960 boolean clearValue(K key, int hash, ValueReference<K, V> valueReference) { 2961 lock(); 2962 try { 2963 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2964 int index = hash & (table.length() - 1); 2965 ReferenceEntry<K, V> first = table.get(index); 2966 2967 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 2968 K entryKey = e.getKey(); 2969 if (e.getHash() == hash && entryKey != null 2970 && map.keyEquivalence.equivalent(key, entryKey)) { 2971 ValueReference<K, V> v = e.getValueReference(); 2972 if (v == valueReference) { 2973 ReferenceEntry<K, V> newFirst = removeFromChain(first, e); 2974 table.set(index, newFirst); 2975 return true; 2976 } 2977 return false; 2978 } 2979 } 2980 2981 return false; 2982 } finally { 2983 unlock(); 2984 postWriteCleanup(); 2985 } 2986 } 2987 2988 @GuardedBy("Segment.this") removeEntry(ReferenceEntry<K, V> entry, int hash, RemovalCause cause)2989 boolean removeEntry(ReferenceEntry<K, V> entry, int hash, RemovalCause cause) { 2990 int newCount = this.count - 1; 2991 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2992 int index = hash & (table.length() - 1); 2993 ReferenceEntry<K, V> first = table.get(index); 2994 2995 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 2996 if (e == entry) { 2997 ++modCount; 2998 enqueueNotification(e.getKey(), hash, e.getValueReference().get(), cause); 2999 ReferenceEntry<K, V> newFirst = removeFromChain(first, e); 3000 newCount = this.count - 1; 3001 table.set(index, newFirst); 3002 this.count = newCount; // write-volatile 3003 return true; 3004 } 3005 } 3006 3007 return false; 3008 } 3009 3010 /** 3011 * Returns {@code true} if the value has been partially collected, meaning that the value is 3012 * null and it is not computing. 3013 */ isCollected(ValueReference<K, V> valueReference)3014 boolean isCollected(ValueReference<K, V> valueReference) { 3015 if (valueReference.isComputingReference()) { 3016 return false; 3017 } 3018 return (valueReference.get() == null); 3019 } 3020 3021 /** 3022 * Gets the value from an entry. Returns {@code null} if the entry is invalid, 3023 * partially-collected, computing, or expired. 3024 */ getLiveValue(ReferenceEntry<K, V> entry)3025 V getLiveValue(ReferenceEntry<K, V> entry) { 3026 if (entry.getKey() == null) { 3027 tryDrainReferenceQueues(); 3028 return null; 3029 } 3030 V value = entry.getValueReference().get(); 3031 if (value == null) { 3032 tryDrainReferenceQueues(); 3033 return null; 3034 } 3035 3036 if (map.expires() && map.isExpired(entry)) { 3037 tryExpireEntries(); 3038 return null; 3039 } 3040 return value; 3041 } 3042 3043 /** 3044 * Performs routine cleanup following a read. Normally cleanup happens during writes, or from 3045 * the cleanupExecutor. If cleanup is not observed after a sufficient number of reads, try 3046 * cleaning up from the read thread. 3047 */ postReadCleanup()3048 void postReadCleanup() { 3049 if ((readCount.incrementAndGet() & DRAIN_THRESHOLD) == 0) { 3050 runCleanup(); 3051 } 3052 } 3053 3054 /** 3055 * Performs routine cleanup prior to executing a write. This should be called every time a 3056 * write thread acquires the segment lock, immediately after acquiring the lock. 3057 * 3058 * <p>Post-condition: expireEntries has been run. 3059 */ 3060 @GuardedBy("Segment.this") preWriteCleanup()3061 void preWriteCleanup() { 3062 runLockedCleanup(); 3063 } 3064 3065 /** 3066 * Performs routine cleanup following a write. 3067 */ postWriteCleanup()3068 void postWriteCleanup() { 3069 runUnlockedCleanup(); 3070 } 3071 runCleanup()3072 void runCleanup() { 3073 runLockedCleanup(); 3074 runUnlockedCleanup(); 3075 } 3076 runLockedCleanup()3077 void runLockedCleanup() { 3078 if (tryLock()) { 3079 try { 3080 drainReferenceQueues(); 3081 expireEntries(); // calls drainRecencyQueue 3082 readCount.set(0); 3083 } finally { 3084 unlock(); 3085 } 3086 } 3087 } 3088 runUnlockedCleanup()3089 void runUnlockedCleanup() { 3090 // locked cleanup may generate notifications we can send unlocked 3091 if (!isHeldByCurrentThread()) { 3092 map.processPendingNotifications(); 3093 } 3094 } 3095 3096 } 3097 3098 // Queues 3099 3100 /** 3101 * A custom queue for managing eviction order. Note that this is tightly integrated with {@code 3102 * ReferenceEntry}, upon which it relies to perform its linking. 3103 * 3104 * <p>Note that this entire implementation makes the assumption that all elements which are in 3105 * the map are also in this queue, and that all elements not in the queue are not in the map. 3106 * 3107 * <p>The benefits of creating our own queue are that (1) we can replace elements in the middle 3108 * of the queue as part of copyEvictableEntry, and (2) the contains method is highly optimized 3109 * for the current model. 3110 */ 3111 static final class EvictionQueue<K, V> extends AbstractQueue<ReferenceEntry<K, V>> { 3112 final ReferenceEntry<K, V> head = new AbstractReferenceEntry<K, V>() { 3113 3114 ReferenceEntry<K, V> nextEvictable = this; 3115 3116 @Override 3117 public ReferenceEntry<K, V> getNextEvictable() { 3118 return nextEvictable; 3119 } 3120 3121 @Override 3122 public void setNextEvictable(ReferenceEntry<K, V> next) { 3123 this.nextEvictable = next; 3124 } 3125 3126 ReferenceEntry<K, V> previousEvictable = this; 3127 3128 @Override 3129 public ReferenceEntry<K, V> getPreviousEvictable() { 3130 return previousEvictable; 3131 } 3132 3133 @Override 3134 public void setPreviousEvictable(ReferenceEntry<K, V> previous) { 3135 this.previousEvictable = previous; 3136 } 3137 }; 3138 3139 // implements Queue 3140 3141 @Override offer(ReferenceEntry<K, V> entry)3142 public boolean offer(ReferenceEntry<K, V> entry) { 3143 // unlink 3144 connectEvictables(entry.getPreviousEvictable(), entry.getNextEvictable()); 3145 3146 // add to tail 3147 connectEvictables(head.getPreviousEvictable(), entry); 3148 connectEvictables(entry, head); 3149 3150 return true; 3151 } 3152 3153 @Override peek()3154 public ReferenceEntry<K, V> peek() { 3155 ReferenceEntry<K, V> next = head.getNextEvictable(); 3156 return (next == head) ? null : next; 3157 } 3158 3159 @Override poll()3160 public ReferenceEntry<K, V> poll() { 3161 ReferenceEntry<K, V> next = head.getNextEvictable(); 3162 if (next == head) { 3163 return null; 3164 } 3165 3166 remove(next); 3167 return next; 3168 } 3169 3170 @Override 3171 @SuppressWarnings("unchecked") remove(Object o)3172 public boolean remove(Object o) { 3173 ReferenceEntry<K, V> e = (ReferenceEntry) o; 3174 ReferenceEntry<K, V> previous = e.getPreviousEvictable(); 3175 ReferenceEntry<K, V> next = e.getNextEvictable(); 3176 connectEvictables(previous, next); 3177 nullifyEvictable(e); 3178 3179 return next != NullEntry.INSTANCE; 3180 } 3181 3182 @Override 3183 @SuppressWarnings("unchecked") contains(Object o)3184 public boolean contains(Object o) { 3185 ReferenceEntry<K, V> e = (ReferenceEntry) o; 3186 return e.getNextEvictable() != NullEntry.INSTANCE; 3187 } 3188 3189 @Override isEmpty()3190 public boolean isEmpty() { 3191 return head.getNextEvictable() == head; 3192 } 3193 3194 @Override size()3195 public int size() { 3196 int size = 0; 3197 for (ReferenceEntry<K, V> e = head.getNextEvictable(); e != head; e = e.getNextEvictable()) { 3198 size++; 3199 } 3200 return size; 3201 } 3202 3203 @Override clear()3204 public void clear() { 3205 ReferenceEntry<K, V> e = head.getNextEvictable(); 3206 while (e != head) { 3207 ReferenceEntry<K, V> next = e.getNextEvictable(); 3208 nullifyEvictable(e); 3209 e = next; 3210 } 3211 3212 head.setNextEvictable(head); 3213 head.setPreviousEvictable(head); 3214 } 3215 3216 @Override iterator()3217 public Iterator<ReferenceEntry<K, V>> iterator() { 3218 return new AbstractSequentialIterator<ReferenceEntry<K, V>>(peek()) { 3219 @Override 3220 protected ReferenceEntry<K, V> computeNext(ReferenceEntry<K, V> previous) { 3221 ReferenceEntry<K, V> next = previous.getNextEvictable(); 3222 return (next == head) ? null : next; 3223 } 3224 }; 3225 } 3226 } 3227 3228 /** 3229 * A custom queue for managing expiration order. Note that this is tightly integrated with 3230 * {@code ReferenceEntry}, upon which it reliese to perform its linking. 3231 * 3232 * <p>Note that this entire implementation makes the assumption that all elements which are in 3233 * the map are also in this queue, and that all elements not in the queue are not in the map. 3234 * 3235 * <p>The benefits of creating our own queue are that (1) we can replace elements in the middle 3236 * of the queue as part of copyEvictableEntry, and (2) the contains method is highly optimized 3237 * for the current model. 3238 */ 3239 static final class ExpirationQueue<K, V> extends AbstractQueue<ReferenceEntry<K, V>> { 3240 final ReferenceEntry<K, V> head = new AbstractReferenceEntry<K, V>() { 3241 3242 @Override 3243 public long getExpirationTime() { 3244 return Long.MAX_VALUE; 3245 } 3246 3247 @Override 3248 public void setExpirationTime(long time) {} 3249 3250 ReferenceEntry<K, V> nextExpirable = this; 3251 3252 @Override 3253 public ReferenceEntry<K, V> getNextExpirable() { 3254 return nextExpirable; 3255 } 3256 3257 @Override 3258 public void setNextExpirable(ReferenceEntry<K, V> next) { 3259 this.nextExpirable = next; 3260 } 3261 3262 ReferenceEntry<K, V> previousExpirable = this; 3263 3264 @Override 3265 public ReferenceEntry<K, V> getPreviousExpirable() { 3266 return previousExpirable; 3267 } 3268 3269 @Override 3270 public void setPreviousExpirable(ReferenceEntry<K, V> previous) { 3271 this.previousExpirable = previous; 3272 } 3273 }; 3274 3275 // implements Queue 3276 3277 @Override 3278 public boolean offer(ReferenceEntry<K, V> entry) { 3279 // unlink 3280 connectExpirables(entry.getPreviousExpirable(), entry.getNextExpirable()); 3281 3282 // add to tail 3283 connectExpirables(head.getPreviousExpirable(), entry); 3284 connectExpirables(entry, head); 3285 3286 return true; 3287 } 3288 3289 @Override 3290 public ReferenceEntry<K, V> peek() { 3291 ReferenceEntry<K, V> next = head.getNextExpirable(); 3292 return (next == head) ? null : next; 3293 } 3294 3295 @Override 3296 public ReferenceEntry<K, V> poll() { 3297 ReferenceEntry<K, V> next = head.getNextExpirable(); 3298 if (next == head) { 3299 return null; 3300 } 3301 3302 remove(next); 3303 return next; 3304 } 3305 3306 @Override 3307 @SuppressWarnings("unchecked") 3308 public boolean remove(Object o) { 3309 ReferenceEntry<K, V> e = (ReferenceEntry) o; 3310 ReferenceEntry<K, V> previous = e.getPreviousExpirable(); 3311 ReferenceEntry<K, V> next = e.getNextExpirable(); 3312 connectExpirables(previous, next); 3313 nullifyExpirable(e); 3314 3315 return next != NullEntry.INSTANCE; 3316 } 3317 3318 @Override 3319 @SuppressWarnings("unchecked") 3320 public boolean contains(Object o) { 3321 ReferenceEntry<K, V> e = (ReferenceEntry) o; 3322 return e.getNextExpirable() != NullEntry.INSTANCE; 3323 } 3324 3325 @Override 3326 public boolean isEmpty() { 3327 return head.getNextExpirable() == head; 3328 } 3329 3330 @Override 3331 public int size() { 3332 int size = 0; 3333 for (ReferenceEntry<K, V> e = head.getNextExpirable(); e != head; e = e.getNextExpirable()) { 3334 size++; 3335 } 3336 return size; 3337 } 3338 3339 @Override 3340 public void clear() { 3341 ReferenceEntry<K, V> e = head.getNextExpirable(); 3342 while (e != head) { 3343 ReferenceEntry<K, V> next = e.getNextExpirable(); 3344 nullifyExpirable(e); 3345 e = next; 3346 } 3347 3348 head.setNextExpirable(head); 3349 head.setPreviousExpirable(head); 3350 } 3351 3352 @Override 3353 public Iterator<ReferenceEntry<K, V>> iterator() { 3354 return new AbstractSequentialIterator<ReferenceEntry<K, V>>(peek()) { 3355 @Override 3356 protected ReferenceEntry<K, V> computeNext(ReferenceEntry<K, V> previous) { 3357 ReferenceEntry<K, V> next = previous.getNextExpirable(); 3358 return (next == head) ? null : next; 3359 } 3360 }; 3361 } 3362 } 3363 3364 static final class CleanupMapTask implements Runnable { 3365 final WeakReference<MapMakerInternalMap<?, ?>> mapReference; 3366 3367 public CleanupMapTask(MapMakerInternalMap<?, ?> map) { 3368 this.mapReference = new WeakReference<MapMakerInternalMap<?, ?>>(map); 3369 } 3370 3371 @Override 3372 public void run() { 3373 MapMakerInternalMap<?, ?> map = mapReference.get(); 3374 if (map == null) { 3375 throw new CancellationException(); 3376 } 3377 3378 for (Segment<?, ?> segment : map.segments) { 3379 segment.runCleanup(); 3380 } 3381 } 3382 } 3383 3384 // ConcurrentMap methods 3385 3386 @Override 3387 public boolean isEmpty() { 3388 /* 3389 * Sum per-segment modCounts to avoid mis-reporting when elements are concurrently added and 3390 * removed in one segment while checking another, in which case the table was never actually 3391 * empty at any point. (The sum ensures accuracy up through at least 1<<31 per-segment 3392 * modifications before recheck.) Method containsValue() uses similar constructions for 3393 * stability checks. 3394 */ 3395 long sum = 0L; 3396 Segment<K, V>[] segments = this.segments; 3397 for (int i = 0; i < segments.length; ++i) { 3398 if (segments[i].count != 0) { 3399 return false; 3400 } 3401 sum += segments[i].modCount; 3402 } 3403 3404 if (sum != 0L) { // recheck unless no modifications 3405 for (int i = 0; i < segments.length; ++i) { 3406 if (segments[i].count != 0) { 3407 return false; 3408 } 3409 sum -= segments[i].modCount; 3410 } 3411 if (sum != 0L) { 3412 return false; 3413 } 3414 } 3415 return true; 3416 } 3417 3418 @Override 3419 public int size() { 3420 Segment<K, V>[] segments = this.segments; 3421 long sum = 0; 3422 for (int i = 0; i < segments.length; ++i) { 3423 sum += segments[i].count; 3424 } 3425 return Ints.saturatedCast(sum); 3426 } 3427 3428 @Override 3429 public V get(@Nullable Object key) { 3430 if (key == null) { 3431 return null; 3432 } 3433 int hash = hash(key); 3434 return segmentFor(hash).get(key, hash); 3435 } 3436 3437 /** 3438 * Returns the internal entry for the specified key. The entry may be computing, expired, or 3439 * partially collected. Does not impact recency ordering. 3440 */ 3441 ReferenceEntry<K, V> getEntry(@Nullable Object key) { 3442 if (key == null) { 3443 return null; 3444 } 3445 int hash = hash(key); 3446 return segmentFor(hash).getEntry(key, hash); 3447 } 3448 3449 @Override 3450 public boolean containsKey(@Nullable Object key) { 3451 if (key == null) { 3452 return false; 3453 } 3454 int hash = hash(key); 3455 return segmentFor(hash).containsKey(key, hash); 3456 } 3457 3458 @Override 3459 public boolean containsValue(@Nullable Object value) { 3460 if (value == null) { 3461 return false; 3462 } 3463 3464 // This implementation is patterned after ConcurrentHashMap, but without the locking. The only 3465 // way for it to return a false negative would be for the target value to jump around in the map 3466 // such that none of the subsequent iterations observed it, despite the fact that at every point 3467 // in time it was present somewhere int the map. This becomes increasingly unlikely as 3468 // CONTAINS_VALUE_RETRIES increases, though without locking it is theoretically possible. 3469 final Segment<K, V>[] segments = this.segments; 3470 long last = -1L; 3471 for (int i = 0; i < CONTAINS_VALUE_RETRIES; i++) { 3472 long sum = 0L; 3473 for (Segment<K, V> segment : segments) { 3474 // ensure visibility of most recent completed write 3475 @SuppressWarnings({"UnusedDeclaration", "unused"}) 3476 int c = segment.count; // read-volatile 3477 3478 AtomicReferenceArray<ReferenceEntry<K, V>> table = segment.table; 3479 for (int j = 0; j < table.length(); j++) { 3480 for (ReferenceEntry<K, V> e = table.get(j); e != null; e = e.getNext()) { 3481 V v = segment.getLiveValue(e); 3482 if (v != null && valueEquivalence.equivalent(value, v)) { 3483 return true; 3484 } 3485 } 3486 } 3487 sum += segment.modCount; 3488 } 3489 if (sum == last) { 3490 break; 3491 } 3492 last = sum; 3493 } 3494 return false; 3495 } 3496 3497 @Override 3498 public V put(K key, V value) { 3499 checkNotNull(key); 3500 checkNotNull(value); 3501 int hash = hash(key); 3502 return segmentFor(hash).put(key, hash, value, false); 3503 } 3504 3505 @Override 3506 public V putIfAbsent(K key, V value) { 3507 checkNotNull(key); 3508 checkNotNull(value); 3509 int hash = hash(key); 3510 return segmentFor(hash).put(key, hash, value, true); 3511 } 3512 3513 @Override 3514 public void putAll(Map<? extends K, ? extends V> m) { 3515 for (Entry<? extends K, ? extends V> e : m.entrySet()) { 3516 put(e.getKey(), e.getValue()); 3517 } 3518 } 3519 3520 @Override 3521 public V remove(@Nullable Object key) { 3522 if (key == null) { 3523 return null; 3524 } 3525 int hash = hash(key); 3526 return segmentFor(hash).remove(key, hash); 3527 } 3528 3529 @Override 3530 public boolean remove(@Nullable Object key, @Nullable Object value) { 3531 if (key == null || value == null) { 3532 return false; 3533 } 3534 int hash = hash(key); 3535 return segmentFor(hash).remove(key, hash, value); 3536 } 3537 3538 @Override 3539 public boolean replace(K key, @Nullable V oldValue, V newValue) { 3540 checkNotNull(key); 3541 checkNotNull(newValue); 3542 if (oldValue == null) { 3543 return false; 3544 } 3545 int hash = hash(key); 3546 return segmentFor(hash).replace(key, hash, oldValue, newValue); 3547 } 3548 3549 @Override 3550 public V replace(K key, V value) { 3551 checkNotNull(key); 3552 checkNotNull(value); 3553 int hash = hash(key); 3554 return segmentFor(hash).replace(key, hash, value); 3555 } 3556 3557 @Override 3558 public void clear() { 3559 for (Segment<K, V> segment : segments) { 3560 segment.clear(); 3561 } 3562 } 3563 3564 transient Set<K> keySet; 3565 3566 @Override 3567 public Set<K> keySet() { 3568 Set<K> ks = keySet; 3569 return (ks != null) ? ks : (keySet = new KeySet()); 3570 } 3571 3572 transient Collection<V> values; 3573 3574 @Override 3575 public Collection<V> values() { 3576 Collection<V> vs = values; 3577 return (vs != null) ? vs : (values = new Values()); 3578 } 3579 3580 transient Set<Entry<K, V>> entrySet; 3581 3582 @Override 3583 public Set<Entry<K, V>> entrySet() { 3584 Set<Entry<K, V>> es = entrySet; 3585 return (es != null) ? es : (entrySet = new EntrySet()); 3586 } 3587 3588 // Iterator Support 3589 3590 abstract class HashIterator<E> implements Iterator<E> { 3591 3592 int nextSegmentIndex; 3593 int nextTableIndex; 3594 Segment<K, V> currentSegment; 3595 AtomicReferenceArray<ReferenceEntry<K, V>> currentTable; 3596 ReferenceEntry<K, V> nextEntry; 3597 WriteThroughEntry nextExternal; 3598 WriteThroughEntry lastReturned; 3599 3600 HashIterator() { 3601 nextSegmentIndex = segments.length - 1; 3602 nextTableIndex = -1; 3603 advance(); 3604 } 3605 3606 @Override 3607 public abstract E next(); 3608 3609 final void advance() { 3610 nextExternal = null; 3611 3612 if (nextInChain()) { 3613 return; 3614 } 3615 3616 if (nextInTable()) { 3617 return; 3618 } 3619 3620 while (nextSegmentIndex >= 0) { 3621 currentSegment = segments[nextSegmentIndex--]; 3622 if (currentSegment.count != 0) { 3623 currentTable = currentSegment.table; 3624 nextTableIndex = currentTable.length() - 1; 3625 if (nextInTable()) { 3626 return; 3627 } 3628 } 3629 } 3630 } 3631 3632 /** 3633 * Finds the next entry in the current chain. Returns {@code true} if an entry was found. 3634 */ 3635 boolean nextInChain() { 3636 if (nextEntry != null) { 3637 for (nextEntry = nextEntry.getNext(); nextEntry != null; nextEntry = nextEntry.getNext()) { 3638 if (advanceTo(nextEntry)) { 3639 return true; 3640 } 3641 } 3642 } 3643 return false; 3644 } 3645 3646 /** 3647 * Finds the next entry in the current table. Returns {@code true} if an entry was found. 3648 */ 3649 boolean nextInTable() { 3650 while (nextTableIndex >= 0) { 3651 if ((nextEntry = currentTable.get(nextTableIndex--)) != null) { 3652 if (advanceTo(nextEntry) || nextInChain()) { 3653 return true; 3654 } 3655 } 3656 } 3657 return false; 3658 } 3659 3660 /** 3661 * Advances to the given entry. Returns {@code true} if the entry was valid, {@code false} if it 3662 * should be skipped. 3663 */ 3664 boolean advanceTo(ReferenceEntry<K, V> entry) { 3665 try { 3666 K key = entry.getKey(); 3667 V value = getLiveValue(entry); 3668 if (value != null) { 3669 nextExternal = new WriteThroughEntry(key, value); 3670 return true; 3671 } else { 3672 // Skip stale entry. 3673 return false; 3674 } 3675 } finally { 3676 currentSegment.postReadCleanup(); 3677 } 3678 } 3679 3680 @Override 3681 public boolean hasNext() { 3682 return nextExternal != null; 3683 } 3684 3685 WriteThroughEntry nextEntry() { 3686 if (nextExternal == null) { 3687 throw new NoSuchElementException(); 3688 } 3689 lastReturned = nextExternal; 3690 advance(); 3691 return lastReturned; 3692 } 3693 3694 @Override 3695 public void remove() { 3696 checkRemove(lastReturned != null); 3697 MapMakerInternalMap.this.remove(lastReturned.getKey()); 3698 lastReturned = null; 3699 } 3700 } 3701 3702 final class KeyIterator extends HashIterator<K> { 3703 3704 @Override 3705 public K next() { 3706 return nextEntry().getKey(); 3707 } 3708 } 3709 3710 final class ValueIterator extends HashIterator<V> { 3711 3712 @Override 3713 public V next() { 3714 return nextEntry().getValue(); 3715 } 3716 } 3717 3718 /** 3719 * Custom Entry class used by EntryIterator.next(), that relays setValue changes to the 3720 * underlying map. 3721 */ 3722 final class WriteThroughEntry extends AbstractMapEntry<K, V> { 3723 final K key; // non-null 3724 V value; // non-null 3725 3726 WriteThroughEntry(K key, V value) { 3727 this.key = key; 3728 this.value = value; 3729 } 3730 3731 @Override 3732 public K getKey() { 3733 return key; 3734 } 3735 3736 @Override 3737 public V getValue() { 3738 return value; 3739 } 3740 3741 @Override 3742 public boolean equals(@Nullable Object object) { 3743 // Cannot use key and value equivalence 3744 if (object instanceof Entry) { 3745 Entry<?, ?> that = (Entry<?, ?>) object; 3746 return key.equals(that.getKey()) && value.equals(that.getValue()); 3747 } 3748 return false; 3749 } 3750 3751 @Override 3752 public int hashCode() { 3753 // Cannot use key and value equivalence 3754 return key.hashCode() ^ value.hashCode(); 3755 } 3756 3757 @Override 3758 public V setValue(V newValue) { 3759 V oldValue = put(key, newValue); 3760 value = newValue; // only if put succeeds 3761 return oldValue; 3762 } 3763 } 3764 3765 final class EntryIterator extends HashIterator<Entry<K, V>> { 3766 3767 @Override 3768 public Entry<K, V> next() { 3769 return nextEntry(); 3770 } 3771 } 3772 3773 final class KeySet extends AbstractSet<K> { 3774 3775 @Override 3776 public Iterator<K> iterator() { 3777 return new KeyIterator(); 3778 } 3779 3780 @Override 3781 public int size() { 3782 return MapMakerInternalMap.this.size(); 3783 } 3784 3785 @Override 3786 public boolean isEmpty() { 3787 return MapMakerInternalMap.this.isEmpty(); 3788 } 3789 3790 @Override 3791 public boolean contains(Object o) { 3792 return MapMakerInternalMap.this.containsKey(o); 3793 } 3794 3795 @Override 3796 public boolean remove(Object o) { 3797 return MapMakerInternalMap.this.remove(o) != null; 3798 } 3799 3800 @Override 3801 public void clear() { 3802 MapMakerInternalMap.this.clear(); 3803 } 3804 } 3805 3806 final class Values extends AbstractCollection<V> { 3807 3808 @Override 3809 public Iterator<V> iterator() { 3810 return new ValueIterator(); 3811 } 3812 3813 @Override 3814 public int size() { 3815 return MapMakerInternalMap.this.size(); 3816 } 3817 3818 @Override 3819 public boolean isEmpty() { 3820 return MapMakerInternalMap.this.isEmpty(); 3821 } 3822 3823 @Override 3824 public boolean contains(Object o) { 3825 return MapMakerInternalMap.this.containsValue(o); 3826 } 3827 3828 @Override 3829 public void clear() { 3830 MapMakerInternalMap.this.clear(); 3831 } 3832 } 3833 3834 final class EntrySet extends AbstractSet<Entry<K, V>> { 3835 3836 @Override 3837 public Iterator<Entry<K, V>> iterator() { 3838 return new EntryIterator(); 3839 } 3840 3841 @Override 3842 public boolean contains(Object o) { 3843 if (!(o instanceof Entry)) { 3844 return false; 3845 } 3846 Entry<?, ?> e = (Entry<?, ?>) o; 3847 Object key = e.getKey(); 3848 if (key == null) { 3849 return false; 3850 } 3851 V v = MapMakerInternalMap.this.get(key); 3852 3853 return v != null && valueEquivalence.equivalent(e.getValue(), v); 3854 } 3855 3856 @Override 3857 public boolean remove(Object o) { 3858 if (!(o instanceof Entry)) { 3859 return false; 3860 } 3861 Entry<?, ?> e = (Entry<?, ?>) o; 3862 Object key = e.getKey(); 3863 return key != null && MapMakerInternalMap.this.remove(key, e.getValue()); 3864 } 3865 3866 @Override 3867 public int size() { 3868 return MapMakerInternalMap.this.size(); 3869 } 3870 3871 @Override 3872 public boolean isEmpty() { 3873 return MapMakerInternalMap.this.isEmpty(); 3874 } 3875 3876 @Override 3877 public void clear() { 3878 MapMakerInternalMap.this.clear(); 3879 } 3880 } 3881 3882 // Serialization Support 3883 3884 private static final long serialVersionUID = 5; 3885 3886 Object writeReplace() { 3887 return new SerializationProxy<K, V>(keyStrength, valueStrength, keyEquivalence, 3888 valueEquivalence, expireAfterWriteNanos, expireAfterAccessNanos, maximumSize, 3889 concurrencyLevel, removalListener, this); 3890 } 3891 3892 /** 3893 * The actual object that gets serialized. Unfortunately, readResolve() doesn't get called when a 3894 * circular dependency is present, so the proxy must be able to behave as the map itself. 3895 */ 3896 abstract static class AbstractSerializationProxy<K, V> 3897 extends ForwardingConcurrentMap<K, V> implements Serializable { 3898 private static final long serialVersionUID = 3; 3899 3900 final Strength keyStrength; 3901 final Strength valueStrength; 3902 final Equivalence<Object> keyEquivalence; 3903 final Equivalence<Object> valueEquivalence; 3904 final long expireAfterWriteNanos; 3905 final long expireAfterAccessNanos; 3906 final int maximumSize; 3907 final int concurrencyLevel; 3908 final RemovalListener<? super K, ? super V> removalListener; 3909 3910 transient ConcurrentMap<K, V> delegate; 3911 3912 AbstractSerializationProxy(Strength keyStrength, Strength valueStrength, 3913 Equivalence<Object> keyEquivalence, Equivalence<Object> valueEquivalence, 3914 long expireAfterWriteNanos, long expireAfterAccessNanos, int maximumSize, 3915 int concurrencyLevel, RemovalListener<? super K, ? super V> removalListener, 3916 ConcurrentMap<K, V> delegate) { 3917 this.keyStrength = keyStrength; 3918 this.valueStrength = valueStrength; 3919 this.keyEquivalence = keyEquivalence; 3920 this.valueEquivalence = valueEquivalence; 3921 this.expireAfterWriteNanos = expireAfterWriteNanos; 3922 this.expireAfterAccessNanos = expireAfterAccessNanos; 3923 this.maximumSize = maximumSize; 3924 this.concurrencyLevel = concurrencyLevel; 3925 this.removalListener = removalListener; 3926 this.delegate = delegate; 3927 } 3928 3929 @Override 3930 protected ConcurrentMap<K, V> delegate() { 3931 return delegate; 3932 } 3933 3934 void writeMapTo(ObjectOutputStream out) throws IOException { 3935 out.writeInt(delegate.size()); 3936 for (Entry<K, V> entry : delegate.entrySet()) { 3937 out.writeObject(entry.getKey()); 3938 out.writeObject(entry.getValue()); 3939 } 3940 out.writeObject(null); // terminate entries 3941 } 3942 3943 @SuppressWarnings("deprecation") // serialization of deprecated feature 3944 MapMaker readMapMaker(ObjectInputStream in) throws IOException { 3945 int size = in.readInt(); 3946 MapMaker mapMaker = new MapMaker() 3947 .initialCapacity(size) 3948 .setKeyStrength(keyStrength) 3949 .setValueStrength(valueStrength) 3950 .keyEquivalence(keyEquivalence) 3951 .concurrencyLevel(concurrencyLevel); 3952 mapMaker.removalListener(removalListener); 3953 if (expireAfterWriteNanos > 0) { 3954 mapMaker.expireAfterWrite(expireAfterWriteNanos, TimeUnit.NANOSECONDS); 3955 } 3956 if (expireAfterAccessNanos > 0) { 3957 mapMaker.expireAfterAccess(expireAfterAccessNanos, TimeUnit.NANOSECONDS); 3958 } 3959 if (maximumSize != MapMaker.UNSET_INT) { 3960 mapMaker.maximumSize(maximumSize); 3961 } 3962 return mapMaker; 3963 } 3964 3965 @SuppressWarnings("unchecked") 3966 void readEntries(ObjectInputStream in) throws IOException, ClassNotFoundException { 3967 while (true) { 3968 K key = (K) in.readObject(); 3969 if (key == null) { 3970 break; // terminator 3971 } 3972 V value = (V) in.readObject(); 3973 delegate.put(key, value); 3974 } 3975 } 3976 } 3977 3978 /** 3979 * The actual object that gets serialized. Unfortunately, readResolve() doesn't get called when a 3980 * circular dependency is present, so the proxy must be able to behave as the map itself. 3981 */ 3982 private static final class SerializationProxy<K, V> extends AbstractSerializationProxy<K, V> { 3983 private static final long serialVersionUID = 3; 3984 3985 SerializationProxy(Strength keyStrength, Strength valueStrength, 3986 Equivalence<Object> keyEquivalence, Equivalence<Object> valueEquivalence, 3987 long expireAfterWriteNanos, long expireAfterAccessNanos, int maximumSize, 3988 int concurrencyLevel, RemovalListener<? super K, ? super V> removalListener, 3989 ConcurrentMap<K, V> delegate) { 3990 super(keyStrength, valueStrength, keyEquivalence, valueEquivalence, expireAfterWriteNanos, 3991 expireAfterAccessNanos, maximumSize, concurrencyLevel, removalListener, delegate); 3992 } 3993 3994 private void writeObject(ObjectOutputStream out) throws IOException { 3995 out.defaultWriteObject(); 3996 writeMapTo(out); 3997 } 3998 3999 private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { 4000 in.defaultReadObject(); 4001 MapMaker mapMaker = readMapMaker(in); 4002 delegate = mapMaker.makeMap(); 4003 readEntries(in); 4004 } 4005 4006 private Object readResolve() { 4007 return delegate; 4008 } 4009 } 4010 } 4011