1 /* 2 * Copyright (C) 2009 The Guava Authors 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except 5 * in compliance with the License. You may obtain a copy of the License at 6 * 7 * http://www.apache.org/licenses/LICENSE-2.0 8 * 9 * Unless required by applicable law or agreed to in writing, software distributed under the License 10 * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express 11 * or implied. See the License for the specific language governing permissions and limitations under 12 * the License. 13 */ 14 15 package com.google.common.collect; 16 17 import static com.google.common.base.Preconditions.checkNotNull; 18 import static com.google.common.base.Preconditions.checkState; 19 20 import com.google.common.annotations.VisibleForTesting; 21 import com.google.common.base.Equivalence; 22 import com.google.common.base.Equivalences; 23 import com.google.common.base.Ticker; 24 import com.google.common.collect.GenericMapMaker.NullListener; 25 import com.google.common.collect.MapMaker.RemovalCause; 26 import com.google.common.collect.MapMaker.RemovalListener; 27 import com.google.common.collect.MapMaker.RemovalNotification; 28 import com.google.common.primitives.Ints; 29 30 import java.io.IOException; 31 import java.io.ObjectInputStream; 32 import java.io.ObjectOutputStream; 33 import java.io.Serializable; 34 import java.lang.ref.Reference; 35 import java.lang.ref.ReferenceQueue; 36 import java.lang.ref.SoftReference; 37 import java.lang.ref.WeakReference; 38 import java.util.AbstractCollection; 39 import java.util.AbstractMap; 40 import java.util.AbstractQueue; 41 import java.util.AbstractSet; 42 import java.util.Collection; 43 import java.util.Iterator; 44 import java.util.Map; 45 import java.util.NoSuchElementException; 46 import java.util.Queue; 47 import java.util.Set; 48 import java.util.concurrent.CancellationException; 49 import java.util.concurrent.ConcurrentLinkedQueue; 50 import java.util.concurrent.ConcurrentMap; 51 import java.util.concurrent.ExecutionException; 52 import java.util.concurrent.TimeUnit; 53 import java.util.concurrent.atomic.AtomicInteger; 54 import java.util.concurrent.atomic.AtomicReferenceArray; 55 import java.util.concurrent.locks.ReentrantLock; 56 import java.util.logging.Level; 57 import java.util.logging.Logger; 58 59 import javax.annotation.Nullable; 60 import javax.annotation.concurrent.GuardedBy; 61 62 /** 63 * The concurrent hash map implementation built by {@link MapMaker}. 64 * 65 * <p>This implementation is heavily derived from revision 1.96 of <a 66 * href="http://tinyurl.com/ConcurrentHashMap">ConcurrentHashMap.java</a>. 67 * 68 * @author Bob Lee 69 * @author Charles Fry 70 * @author Doug Lea ({@code ConcurrentHashMap}) 71 */ 72 class MapMakerInternalMap<K, V> 73 extends AbstractMap<K, V> implements ConcurrentMap<K, V>, Serializable { 74 75 /* 76 * The basic strategy is to subdivide the table among Segments, each of which itself is a 77 * concurrently readable hash table. The map supports non-blocking reads and concurrent writes 78 * across different segments. 79 * 80 * If a maximum size is specified, a best-effort bounding is performed per segment, using a 81 * page-replacement algorithm to determine which entries to evict when the capacity has been 82 * exceeded. 83 * 84 * The page replacement algorithm's data structures are kept casually consistent with the map. The 85 * ordering of writes to a segment is sequentially consistent. An update to the map and recording 86 * of reads may not be immediately reflected on the algorithm's data structures. These structures 87 * are guarded by a lock and operations are applied in batches to avoid lock contention. The 88 * penalty of applying the batches is spread across threads so that the amortized cost is slightly 89 * higher than performing just the operation without enforcing the capacity constraint. 90 * 91 * This implementation uses a per-segment queue to record a memento of the additions, removals, 92 * and accesses that were performed on the map. The queue is drained on writes and when it exceeds 93 * its capacity threshold. 94 * 95 * The Least Recently Used page replacement algorithm was chosen due to its simplicity, high hit 96 * rate, and ability to be implemented with O(1) time complexity. The initial LRU implementation 97 * operates per-segment rather than globally for increased implementation simplicity. We expect 98 * the cache hit rate to be similar to that of a global LRU algorithm. 99 */ 100 101 // Constants 102 103 /** 104 * The maximum capacity, used if a higher value is implicitly specified by either of the 105 * constructors with arguments. MUST be a power of two <= 1<<30 to ensure that entries are 106 * indexable using ints. 107 */ 108 static final int MAXIMUM_CAPACITY = Ints.MAX_POWER_OF_TWO; 109 110 /** The maximum number of segments to allow; used to bound constructor arguments. */ 111 static final int MAX_SEGMENTS = 1 << 16; // slightly conservative 112 113 /** Number of (unsynchronized) retries in the containsValue method. */ 114 static final int CONTAINS_VALUE_RETRIES = 3; 115 116 /** 117 * Number of cache access operations that can be buffered per segment before the cache's recency 118 * ordering information is updated. This is used to avoid lock contention by recording a memento 119 * of reads and delaying a lock acquisition until the threshold is crossed or a mutation occurs. 120 * 121 * <p>This must be a (2^n)-1 as it is used as a mask. 122 */ 123 static final int DRAIN_THRESHOLD = 0x3F; 124 125 /** 126 * Maximum number of entries to be drained in a single cleanup run. This applies independently to 127 * the cleanup queue and both reference queues. 128 */ 129 // TODO(fry): empirically optimize this 130 static final int DRAIN_MAX = 16; 131 132 static final long CLEANUP_EXECUTOR_DELAY_SECS = 60; 133 134 // Fields 135 136 private static final Logger logger = Logger.getLogger(MapMakerInternalMap.class.getName()); 137 138 /** 139 * Mask value for indexing into segments. The upper bits of a key's hash code are used to choose 140 * the segment. 141 */ 142 final transient int segmentMask; 143 144 /** 145 * Shift value for indexing within segments. Helps prevent entries that end up in the same segment 146 * from also ending up in the same bucket. 147 */ 148 final transient int segmentShift; 149 150 /** The segments, each of which is a specialized hash table. */ 151 final transient Segment<K, V>[] segments; 152 153 /** The concurrency level. */ 154 final int concurrencyLevel; 155 156 /** Strategy for comparing keys. */ 157 final Equivalence<Object> keyEquivalence; 158 159 /** Strategy for comparing values. */ 160 final Equivalence<Object> valueEquivalence; 161 162 /** Strategy for referencing keys. */ 163 final Strength keyStrength; 164 165 /** Strategy for referencing values. */ 166 final Strength valueStrength; 167 168 /** The maximum size of this map. MapMaker.UNSET_INT if there is no maximum. */ 169 final int maximumSize; 170 171 /** How long after the last access to an entry the map will retain that entry. */ 172 final long expireAfterAccessNanos; 173 174 /** How long after the last write to an entry the map will retain that entry. */ 175 final long expireAfterWriteNanos; 176 177 /** Entries waiting to be consumed by the removal listener. */ 178 // TODO(fry): define a new type which creates event objects and automates the clear logic 179 final Queue<RemovalNotification<K, V>> removalNotificationQueue; 180 181 /** 182 * A listener that is invoked when an entry is removed due to expiration or garbage collection of 183 * soft/weak entries. 184 */ 185 final RemovalListener<K, V> removalListener; 186 187 /** Factory used to create new entries. */ 188 final transient EntryFactory entryFactory; 189 190 /** Measures time in a testable way. */ 191 final Ticker ticker; 192 193 /** 194 * Creates a new, empty map with the specified strategy, initial capacity and concurrency level. 195 */ MapMakerInternalMap(MapMaker builder)196 MapMakerInternalMap(MapMaker builder) { 197 concurrencyLevel = Math.min(builder.getConcurrencyLevel(), MAX_SEGMENTS); 198 199 keyStrength = builder.getKeyStrength(); 200 valueStrength = builder.getValueStrength(); 201 202 keyEquivalence = builder.getKeyEquivalence(); 203 valueEquivalence = builder.getValueEquivalence(); 204 205 maximumSize = builder.maximumSize; 206 expireAfterAccessNanos = builder.getExpireAfterAccessNanos(); 207 expireAfterWriteNanos = builder.getExpireAfterWriteNanos(); 208 209 entryFactory = EntryFactory.getFactory(keyStrength, expires(), evictsBySize()); 210 ticker = builder.getTicker(); 211 212 removalListener = builder.getRemovalListener(); 213 removalNotificationQueue = (removalListener == NullListener.INSTANCE) 214 ? MapMakerInternalMap.<RemovalNotification<K, V>>discardingQueue() 215 : new ConcurrentLinkedQueue<RemovalNotification<K, V>>(); 216 217 int initialCapacity = Math.min(builder.getInitialCapacity(), MAXIMUM_CAPACITY); 218 if (evictsBySize()) { 219 initialCapacity = Math.min(initialCapacity, maximumSize); 220 } 221 222 // Find power-of-two sizes best matching arguments. Constraints: 223 // (segmentCount <= maximumSize) 224 // && (concurrencyLevel > maximumSize || segmentCount > concurrencyLevel) 225 int segmentShift = 0; 226 int segmentCount = 1; 227 while (segmentCount < concurrencyLevel 228 && (!evictsBySize() || segmentCount * 2 <= maximumSize)) { 229 ++segmentShift; 230 segmentCount <<= 1; 231 } 232 this.segmentShift = 32 - segmentShift; 233 segmentMask = segmentCount - 1; 234 235 this.segments = newSegmentArray(segmentCount); 236 237 int segmentCapacity = initialCapacity / segmentCount; 238 if (segmentCapacity * segmentCount < initialCapacity) { 239 ++segmentCapacity; 240 } 241 242 int segmentSize = 1; 243 while (segmentSize < segmentCapacity) { 244 segmentSize <<= 1; 245 } 246 247 if (evictsBySize()) { 248 // Ensure sum of segment max sizes = overall max size 249 int maximumSegmentSize = maximumSize / segmentCount + 1; 250 int remainder = maximumSize % segmentCount; 251 for (int i = 0; i < this.segments.length; ++i) { 252 if (i == remainder) { 253 maximumSegmentSize--; 254 } 255 this.segments[i] = 256 createSegment(segmentSize, maximumSegmentSize); 257 } 258 } else { 259 for (int i = 0; i < this.segments.length; ++i) { 260 this.segments[i] = 261 createSegment(segmentSize, MapMaker.UNSET_INT); 262 } 263 } 264 } 265 evictsBySize()266 boolean evictsBySize() { 267 return maximumSize != MapMaker.UNSET_INT; 268 } 269 expires()270 boolean expires() { 271 return expiresAfterWrite() || expiresAfterAccess(); 272 } 273 expiresAfterWrite()274 boolean expiresAfterWrite() { 275 return expireAfterWriteNanos > 0; 276 } 277 expiresAfterAccess()278 boolean expiresAfterAccess() { 279 return expireAfterAccessNanos > 0; 280 } 281 usesKeyReferences()282 boolean usesKeyReferences() { 283 return keyStrength != Strength.STRONG; 284 } 285 usesValueReferences()286 boolean usesValueReferences() { 287 return valueStrength != Strength.STRONG; 288 } 289 290 enum Strength { 291 /* 292 * TODO(kevinb): If we strongly reference the value and aren't computing, we needn't wrap the 293 * value. This could save ~8 bytes per entry. 294 */ 295 296 STRONG { 297 @Override referenceValue( Segment<K, V> segment, ReferenceEntry<K, V> entry, V value)298 <K, V> ValueReference<K, V> referenceValue( 299 Segment<K, V> segment, ReferenceEntry<K, V> entry, V value) { 300 return new StrongValueReference<K, V>(value); 301 } 302 303 @Override defaultEquivalence()304 Equivalence<Object> defaultEquivalence() { 305 return Equivalences.equals(); 306 } 307 }, 308 309 SOFT { 310 @Override referenceValue( Segment<K, V> segment, ReferenceEntry<K, V> entry, V value)311 <K, V> ValueReference<K, V> referenceValue( 312 Segment<K, V> segment, ReferenceEntry<K, V> entry, V value) { 313 return new SoftValueReference<K, V>(segment.valueReferenceQueue, value, entry); 314 } 315 316 @Override defaultEquivalence()317 Equivalence<Object> defaultEquivalence() { 318 return Equivalences.identity(); 319 } 320 }, 321 322 WEAK { 323 @Override referenceValue( Segment<K, V> segment, ReferenceEntry<K, V> entry, V value)324 <K, V> ValueReference<K, V> referenceValue( 325 Segment<K, V> segment, ReferenceEntry<K, V> entry, V value) { 326 return new WeakValueReference<K, V>(segment.valueReferenceQueue, value, entry); 327 } 328 329 @Override defaultEquivalence()330 Equivalence<Object> defaultEquivalence() { 331 return Equivalences.identity(); 332 } 333 }; 334 335 /** 336 * Creates a reference for the given value according to this value strength. 337 */ referenceValue( Segment<K, V> segment, ReferenceEntry<K, V> entry, V value)338 abstract <K, V> ValueReference<K, V> referenceValue( 339 Segment<K, V> segment, ReferenceEntry<K, V> entry, V value); 340 341 /** 342 * Returns the default equivalence strategy used to compare and hash keys or values referenced 343 * at this strength. This strategy will be used unless the user explicitly specifies an 344 * alternate strategy. 345 */ defaultEquivalence()346 abstract Equivalence<Object> defaultEquivalence(); 347 } 348 349 /** 350 * Creates new entries. 351 */ 352 enum EntryFactory { 353 STRONG { 354 @Override newEntry( Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next)355 <K, V> ReferenceEntry<K, V> newEntry( 356 Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 357 return new StrongEntry<K, V>(key, hash, next); 358 } 359 }, 360 STRONG_EXPIRABLE { 361 @Override newEntry( Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next)362 <K, V> ReferenceEntry<K, V> newEntry( 363 Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 364 return new StrongExpirableEntry<K, V>(key, hash, next); 365 } 366 367 @Override copyEntry( Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext)368 <K, V> ReferenceEntry<K, V> copyEntry( 369 Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) { 370 ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext); 371 copyExpirableEntry(original, newEntry); 372 return newEntry; 373 } 374 }, 375 STRONG_EVICTABLE { 376 @Override newEntry( Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next)377 <K, V> ReferenceEntry<K, V> newEntry( 378 Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 379 return new StrongEvictableEntry<K, V>(key, hash, next); 380 } 381 382 @Override copyEntry( Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext)383 <K, V> ReferenceEntry<K, V> copyEntry( 384 Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) { 385 ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext); 386 copyEvictableEntry(original, newEntry); 387 return newEntry; 388 } 389 }, 390 STRONG_EXPIRABLE_EVICTABLE { 391 @Override newEntry( Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next)392 <K, V> ReferenceEntry<K, V> newEntry( 393 Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 394 return new StrongExpirableEvictableEntry<K, V>(key, hash, next); 395 } 396 397 @Override copyEntry( Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext)398 <K, V> ReferenceEntry<K, V> copyEntry( 399 Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) { 400 ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext); 401 copyExpirableEntry(original, newEntry); 402 copyEvictableEntry(original, newEntry); 403 return newEntry; 404 } 405 }, 406 407 SOFT { 408 @Override newEntry( Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next)409 <K, V> ReferenceEntry<K, V> newEntry( 410 Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 411 return new SoftEntry<K, V>(segment.keyReferenceQueue, key, hash, next); 412 } 413 }, 414 SOFT_EXPIRABLE { 415 @Override newEntry( Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next)416 <K, V> ReferenceEntry<K, V> newEntry( 417 Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 418 return new SoftExpirableEntry<K, V>(segment.keyReferenceQueue, key, hash, next); 419 } 420 421 @Override copyEntry( Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext)422 <K, V> ReferenceEntry<K, V> copyEntry( 423 Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) { 424 ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext); 425 copyExpirableEntry(original, newEntry); 426 return newEntry; 427 } 428 }, 429 SOFT_EVICTABLE { 430 @Override newEntry( Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next)431 <K, V> ReferenceEntry<K, V> newEntry( 432 Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 433 return new SoftEvictableEntry<K, V>(segment.keyReferenceQueue, key, hash, next); 434 } 435 436 @Override copyEntry( Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext)437 <K, V> ReferenceEntry<K, V> copyEntry( 438 Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) { 439 ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext); 440 copyEvictableEntry(original, newEntry); 441 return newEntry; 442 } 443 }, 444 SOFT_EXPIRABLE_EVICTABLE { 445 @Override newEntry( Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next)446 <K, V> ReferenceEntry<K, V> newEntry( 447 Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 448 return new SoftExpirableEvictableEntry<K, V>(segment.keyReferenceQueue, key, hash, next); 449 } 450 451 @Override copyEntry( Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext)452 <K, V> ReferenceEntry<K, V> copyEntry( 453 Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) { 454 ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext); 455 copyExpirableEntry(original, newEntry); 456 copyEvictableEntry(original, newEntry); 457 return newEntry; 458 } 459 }, 460 461 WEAK { 462 @Override newEntry( Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next)463 <K, V> ReferenceEntry<K, V> newEntry( 464 Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 465 return new WeakEntry<K, V>(segment.keyReferenceQueue, key, hash, next); 466 } 467 }, 468 WEAK_EXPIRABLE { 469 @Override newEntry( Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next)470 <K, V> ReferenceEntry<K, V> newEntry( 471 Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 472 return new WeakExpirableEntry<K, V>(segment.keyReferenceQueue, key, hash, next); 473 } 474 475 @Override copyEntry( Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext)476 <K, V> ReferenceEntry<K, V> copyEntry( 477 Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) { 478 ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext); 479 copyExpirableEntry(original, newEntry); 480 return newEntry; 481 } 482 }, 483 WEAK_EVICTABLE { 484 @Override newEntry( Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next)485 <K, V> ReferenceEntry<K, V> newEntry( 486 Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 487 return new WeakEvictableEntry<K, V>(segment.keyReferenceQueue, key, hash, next); 488 } 489 490 @Override copyEntry( Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext)491 <K, V> ReferenceEntry<K, V> copyEntry( 492 Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) { 493 ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext); 494 copyEvictableEntry(original, newEntry); 495 return newEntry; 496 } 497 }, 498 WEAK_EXPIRABLE_EVICTABLE { 499 @Override newEntry( Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next)500 <K, V> ReferenceEntry<K, V> newEntry( 501 Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 502 return new WeakExpirableEvictableEntry<K, V>(segment.keyReferenceQueue, key, hash, next); 503 } 504 505 @Override copyEntry( Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext)506 <K, V> ReferenceEntry<K, V> copyEntry( 507 Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) { 508 ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext); 509 copyExpirableEntry(original, newEntry); 510 copyEvictableEntry(original, newEntry); 511 return newEntry; 512 } 513 }; 514 515 /** 516 * Masks used to compute indices in the following table. 517 */ 518 static final int EXPIRABLE_MASK = 1; 519 static final int EVICTABLE_MASK = 2; 520 521 /** 522 * Look-up table for factories. First dimension is the reference type. The second dimension is 523 * the result of OR-ing the feature masks. 524 */ 525 static final EntryFactory[][] factories = { 526 { STRONG, STRONG_EXPIRABLE, STRONG_EVICTABLE, STRONG_EXPIRABLE_EVICTABLE }, 527 { SOFT, SOFT_EXPIRABLE, SOFT_EVICTABLE, SOFT_EXPIRABLE_EVICTABLE }, 528 { WEAK, WEAK_EXPIRABLE, WEAK_EVICTABLE, WEAK_EXPIRABLE_EVICTABLE } 529 }; 530 getFactory(Strength keyStrength, boolean expireAfterWrite, boolean evictsBySize)531 static EntryFactory getFactory(Strength keyStrength, boolean expireAfterWrite, 532 boolean evictsBySize) { 533 int flags = (expireAfterWrite ? EXPIRABLE_MASK : 0) | (evictsBySize ? EVICTABLE_MASK : 0); 534 return factories[keyStrength.ordinal()][flags]; 535 } 536 537 /** 538 * Creates a new entry. 539 * 540 * @param segment to create the entry for 541 * @param key of the entry 542 * @param hash of the key 543 * @param next entry in the same bucket 544 */ newEntry( Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next)545 abstract <K, V> ReferenceEntry<K, V> newEntry( 546 Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next); 547 548 /** 549 * Copies an entry, assigning it a new {@code next} entry. 550 * 551 * @param original the entry to copy 552 * @param newNext entry in the same bucket 553 */ 554 @GuardedBy("Segment.this") copyEntry( Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext)555 <K, V> ReferenceEntry<K, V> copyEntry( 556 Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) { 557 return newEntry(segment, original.getKey(), original.getHash(), newNext); 558 } 559 560 @GuardedBy("Segment.this") copyExpirableEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newEntry)561 <K, V> void copyExpirableEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newEntry) { 562 // TODO(fry): when we link values instead of entries this method can go 563 // away, as can connectExpirables, nullifyExpirable. 564 newEntry.setExpirationTime(original.getExpirationTime()); 565 566 connectExpirables(original.getPreviousExpirable(), newEntry); 567 connectExpirables(newEntry, original.getNextExpirable()); 568 569 nullifyExpirable(original); 570 } 571 572 @GuardedBy("Segment.this") copyEvictableEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newEntry)573 <K, V> void copyEvictableEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newEntry) { 574 // TODO(fry): when we link values instead of entries this method can go 575 // away, as can connectEvictables, nullifyEvictable. 576 connectEvictables(original.getPreviousEvictable(), newEntry); 577 connectEvictables(newEntry, original.getNextEvictable()); 578 579 nullifyEvictable(original); 580 } 581 } 582 583 /** 584 * A reference to a value. 585 */ 586 interface ValueReference<K, V> { 587 /** 588 * Gets the value. Does not block or throw exceptions. 589 */ get()590 V get(); 591 592 /** 593 * Waits for a value that may still be computing. Unlike get(), this method can block (in the 594 * case of FutureValueReference). 595 * 596 * @throws ExecutionException if the computing thread throws an exception 597 */ waitForValue()598 V waitForValue() throws ExecutionException; 599 600 /** 601 * Returns the entry associated with this value reference, or {@code null} if this value 602 * reference is independent of any entry. 603 */ getEntry()604 ReferenceEntry<K, V> getEntry(); 605 606 /** 607 * Creates a copy of this reference for the given entry. 608 */ copyFor(ReferenceQueue<V> queue, ReferenceEntry<K, V> entry)609 ValueReference<K, V> copyFor(ReferenceQueue<V> queue, ReferenceEntry<K, V> entry); 610 611 /** 612 * Clears this reference object. 613 * 614 * @param newValue the new value reference which will replace this one; this is only used during 615 * computation to immediately notify blocked threads of the new value 616 */ clear(@ullable ValueReference<K, V> newValue)617 void clear(@Nullable ValueReference<K, V> newValue); 618 619 /** 620 * Returns {@code true} if the value type is a computing reference (regardless of whether or not 621 * computation has completed). This is necessary to distiguish between partially-collected 622 * entries and computing entries, which need to be cleaned up differently. 623 */ isComputingReference()624 boolean isComputingReference(); 625 } 626 627 /** 628 * Placeholder. Indicates that the value hasn't been set yet. 629 */ 630 static final ValueReference<Object, Object> UNSET = new ValueReference<Object, Object>() { 631 @Override 632 public Object get() { 633 return null; 634 } 635 636 @Override 637 public ReferenceEntry<Object, Object> getEntry() { 638 return null; 639 } 640 641 @Override 642 public ValueReference<Object, Object> copyFor( 643 ReferenceQueue<Object> queue, ReferenceEntry<Object, Object> entry) { 644 return this; 645 } 646 647 @Override 648 public boolean isComputingReference() { 649 return false; 650 } 651 652 @Override 653 public Object waitForValue() { 654 return null; 655 } 656 657 @Override 658 public void clear(ValueReference<Object, Object> newValue) {} 659 }; 660 661 /** 662 * Singleton placeholder that indicates a value is being computed. 663 */ 664 @SuppressWarnings("unchecked") // impl never uses a parameter or returns any non-null value unset()665 static <K, V> ValueReference<K, V> unset() { 666 return (ValueReference<K, V>) UNSET; 667 } 668 669 /** 670 * An entry in a reference map. 671 * 672 * Entries in the map can be in the following states: 673 * 674 * Valid: 675 * - Live: valid key/value are set 676 * - Computing: computation is pending 677 * 678 * Invalid: 679 * - Expired: time expired (key/value may still be set) 680 * - Collected: key/value was partially collected, but not yet cleaned up 681 */ 682 interface ReferenceEntry<K, V> { 683 /** 684 * Gets the value reference from this entry. 685 */ getValueReference()686 ValueReference<K, V> getValueReference(); 687 688 /** 689 * Sets the value reference for this entry. 690 */ setValueReference(ValueReference<K, V> valueReference)691 void setValueReference(ValueReference<K, V> valueReference); 692 693 /** 694 * Gets the next entry in the chain. 695 */ getNext()696 ReferenceEntry<K, V> getNext(); 697 698 /** 699 * Gets the entry's hash. 700 */ getHash()701 int getHash(); 702 703 /** 704 * Gets the key for this entry. 705 */ getKey()706 K getKey(); 707 708 /* 709 * Used by entries that are expirable. Expirable entries are maintained in a doubly-linked list. 710 * New entries are added at the tail of the list at write time; stale entries are expired from 711 * the head of the list. 712 */ 713 714 /** 715 * Gets the entry expiration time in ns. 716 */ getExpirationTime()717 long getExpirationTime(); 718 719 /** 720 * Sets the entry expiration time in ns. 721 */ setExpirationTime(long time)722 void setExpirationTime(long time); 723 724 /** 725 * Gets the next entry in the recency list. 726 */ getNextExpirable()727 ReferenceEntry<K, V> getNextExpirable(); 728 729 /** 730 * Sets the next entry in the recency list. 731 */ setNextExpirable(ReferenceEntry<K, V> next)732 void setNextExpirable(ReferenceEntry<K, V> next); 733 734 /** 735 * Gets the previous entry in the recency list. 736 */ getPreviousExpirable()737 ReferenceEntry<K, V> getPreviousExpirable(); 738 739 /** 740 * Sets the previous entry in the recency list. 741 */ setPreviousExpirable(ReferenceEntry<K, V> previous)742 void setPreviousExpirable(ReferenceEntry<K, V> previous); 743 744 /* 745 * Implemented by entries that are evictable. Evictable entries are maintained in a 746 * doubly-linked list. New entries are added at the tail of the list at write time and stale 747 * entries are expired from the head of the list. 748 */ 749 750 /** 751 * Gets the next entry in the recency list. 752 */ getNextEvictable()753 ReferenceEntry<K, V> getNextEvictable(); 754 755 /** 756 * Sets the next entry in the recency list. 757 */ setNextEvictable(ReferenceEntry<K, V> next)758 void setNextEvictable(ReferenceEntry<K, V> next); 759 760 /** 761 * Gets the previous entry in the recency list. 762 */ getPreviousEvictable()763 ReferenceEntry<K, V> getPreviousEvictable(); 764 765 /** 766 * Sets the previous entry in the recency list. 767 */ setPreviousEvictable(ReferenceEntry<K, V> previous)768 void setPreviousEvictable(ReferenceEntry<K, V> previous); 769 } 770 771 private enum NullEntry implements ReferenceEntry<Object, Object> { 772 INSTANCE; 773 774 @Override getValueReference()775 public ValueReference<Object, Object> getValueReference() { 776 return null; 777 } 778 779 @Override setValueReference(ValueReference<Object, Object> valueReference)780 public void setValueReference(ValueReference<Object, Object> valueReference) {} 781 782 @Override getNext()783 public ReferenceEntry<Object, Object> getNext() { 784 return null; 785 } 786 787 @Override getHash()788 public int getHash() { 789 return 0; 790 } 791 792 @Override getKey()793 public Object getKey() { 794 return null; 795 } 796 797 @Override getExpirationTime()798 public long getExpirationTime() { 799 return 0; 800 } 801 802 @Override setExpirationTime(long time)803 public void setExpirationTime(long time) {} 804 805 @Override getNextExpirable()806 public ReferenceEntry<Object, Object> getNextExpirable() { 807 return this; 808 } 809 810 @Override setNextExpirable(ReferenceEntry<Object, Object> next)811 public void setNextExpirable(ReferenceEntry<Object, Object> next) {} 812 813 @Override getPreviousExpirable()814 public ReferenceEntry<Object, Object> getPreviousExpirable() { 815 return this; 816 } 817 818 @Override setPreviousExpirable(ReferenceEntry<Object, Object> previous)819 public void setPreviousExpirable(ReferenceEntry<Object, Object> previous) {} 820 821 @Override getNextEvictable()822 public ReferenceEntry<Object, Object> getNextEvictable() { 823 return this; 824 } 825 826 @Override setNextEvictable(ReferenceEntry<Object, Object> next)827 public void setNextEvictable(ReferenceEntry<Object, Object> next) {} 828 829 @Override getPreviousEvictable()830 public ReferenceEntry<Object, Object> getPreviousEvictable() { 831 return this; 832 } 833 834 @Override setPreviousEvictable(ReferenceEntry<Object, Object> previous)835 public void setPreviousEvictable(ReferenceEntry<Object, Object> previous) {} 836 } 837 838 static abstract class AbstractReferenceEntry<K, V> implements ReferenceEntry<K, V> { 839 @Override getValueReference()840 public ValueReference<K, V> getValueReference() { 841 throw new UnsupportedOperationException(); 842 } 843 844 @Override setValueReference(ValueReference<K, V> valueReference)845 public void setValueReference(ValueReference<K, V> valueReference) { 846 throw new UnsupportedOperationException(); 847 } 848 849 @Override getNext()850 public ReferenceEntry<K, V> getNext() { 851 throw new UnsupportedOperationException(); 852 } 853 854 @Override getHash()855 public int getHash() { 856 throw new UnsupportedOperationException(); 857 } 858 859 @Override getKey()860 public K getKey() { 861 throw new UnsupportedOperationException(); 862 } 863 864 @Override getExpirationTime()865 public long getExpirationTime() { 866 throw new UnsupportedOperationException(); 867 } 868 869 @Override setExpirationTime(long time)870 public void setExpirationTime(long time) { 871 throw new UnsupportedOperationException(); 872 } 873 874 @Override getNextExpirable()875 public ReferenceEntry<K, V> getNextExpirable() { 876 throw new UnsupportedOperationException(); 877 } 878 879 @Override setNextExpirable(ReferenceEntry<K, V> next)880 public void setNextExpirable(ReferenceEntry<K, V> next) { 881 throw new UnsupportedOperationException(); 882 } 883 884 @Override getPreviousExpirable()885 public ReferenceEntry<K, V> getPreviousExpirable() { 886 throw new UnsupportedOperationException(); 887 } 888 889 @Override setPreviousExpirable(ReferenceEntry<K, V> previous)890 public void setPreviousExpirable(ReferenceEntry<K, V> previous) { 891 throw new UnsupportedOperationException(); 892 } 893 894 @Override getNextEvictable()895 public ReferenceEntry<K, V> getNextEvictable() { 896 throw new UnsupportedOperationException(); 897 } 898 899 @Override setNextEvictable(ReferenceEntry<K, V> next)900 public void setNextEvictable(ReferenceEntry<K, V> next) { 901 throw new UnsupportedOperationException(); 902 } 903 904 @Override getPreviousEvictable()905 public ReferenceEntry<K, V> getPreviousEvictable() { 906 throw new UnsupportedOperationException(); 907 } 908 909 @Override setPreviousEvictable(ReferenceEntry<K, V> previous)910 public void setPreviousEvictable(ReferenceEntry<K, V> previous) { 911 throw new UnsupportedOperationException(); 912 } 913 } 914 915 @SuppressWarnings("unchecked") // impl never uses a parameter or returns any non-null value nullEntry()916 static <K, V> ReferenceEntry<K, V> nullEntry() { 917 return (ReferenceEntry<K, V>) NullEntry.INSTANCE; 918 } 919 920 static final Queue<? extends Object> DISCARDING_QUEUE = new AbstractQueue<Object>() { 921 @Override 922 public boolean offer(Object o) { 923 return true; 924 } 925 926 @Override 927 public Object peek() { 928 return null; 929 } 930 931 @Override 932 public Object poll() { 933 return null; 934 } 935 936 @Override 937 public int size() { 938 return 0; 939 } 940 941 @Override 942 public Iterator<Object> iterator() { 943 return Iterators.emptyIterator(); 944 } 945 }; 946 947 /** 948 * Queue that discards all elements. 949 */ 950 @SuppressWarnings("unchecked") // impl never uses a parameter or returns any non-null value discardingQueue()951 static <E> Queue<E> discardingQueue() { 952 return (Queue) DISCARDING_QUEUE; 953 } 954 955 /* 956 * Note: All of this duplicate code sucks, but it saves a lot of memory. If only Java had mixins! 957 * To maintain this code, make a change for the strong reference type. Then, cut and paste, and 958 * replace "Strong" with "Soft" or "Weak" within the pasted text. The primary difference is that 959 * strong entries store the key reference directly while soft and weak entries delegate to their 960 * respective superclasses. 961 */ 962 963 /** 964 * Used for strongly-referenced keys. 965 */ 966 static class StrongEntry<K, V> implements ReferenceEntry<K, V> { 967 final K key; 968 StrongEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next)969 StrongEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) { 970 this.key = key; 971 this.hash = hash; 972 this.next = next; 973 } 974 975 @Override getKey()976 public K getKey() { 977 return this.key; 978 } 979 980 // null expiration 981 982 @Override getExpirationTime()983 public long getExpirationTime() { 984 throw new UnsupportedOperationException(); 985 } 986 987 @Override setExpirationTime(long time)988 public void setExpirationTime(long time) { 989 throw new UnsupportedOperationException(); 990 } 991 992 @Override getNextExpirable()993 public ReferenceEntry<K, V> getNextExpirable() { 994 throw new UnsupportedOperationException(); 995 } 996 997 @Override setNextExpirable(ReferenceEntry<K, V> next)998 public void setNextExpirable(ReferenceEntry<K, V> next) { 999 throw new UnsupportedOperationException(); 1000 } 1001 1002 @Override getPreviousExpirable()1003 public ReferenceEntry<K, V> getPreviousExpirable() { 1004 throw new UnsupportedOperationException(); 1005 } 1006 1007 @Override setPreviousExpirable(ReferenceEntry<K, V> previous)1008 public void setPreviousExpirable(ReferenceEntry<K, V> previous) { 1009 throw new UnsupportedOperationException(); 1010 } 1011 1012 // null eviction 1013 1014 @Override getNextEvictable()1015 public ReferenceEntry<K, V> getNextEvictable() { 1016 throw new UnsupportedOperationException(); 1017 } 1018 1019 @Override setNextEvictable(ReferenceEntry<K, V> next)1020 public void setNextEvictable(ReferenceEntry<K, V> next) { 1021 throw new UnsupportedOperationException(); 1022 } 1023 1024 @Override getPreviousEvictable()1025 public ReferenceEntry<K, V> getPreviousEvictable() { 1026 throw new UnsupportedOperationException(); 1027 } 1028 1029 @Override setPreviousEvictable(ReferenceEntry<K, V> previous)1030 public void setPreviousEvictable(ReferenceEntry<K, V> previous) { 1031 throw new UnsupportedOperationException(); 1032 } 1033 1034 // The code below is exactly the same for each entry type. 1035 1036 final int hash; 1037 final ReferenceEntry<K, V> next; 1038 volatile ValueReference<K, V> valueReference = unset(); 1039 1040 @Override getValueReference()1041 public ValueReference<K, V> getValueReference() { 1042 return valueReference; 1043 } 1044 1045 @Override setValueReference(ValueReference<K, V> valueReference)1046 public void setValueReference(ValueReference<K, V> valueReference) { 1047 ValueReference<K, V> previous = this.valueReference; 1048 this.valueReference = valueReference; 1049 previous.clear(valueReference); 1050 } 1051 1052 @Override getHash()1053 public int getHash() { 1054 return hash; 1055 } 1056 1057 @Override getNext()1058 public ReferenceEntry<K, V> getNext() { 1059 return next; 1060 } 1061 } 1062 1063 static final class StrongExpirableEntry<K, V> extends StrongEntry<K, V> 1064 implements ReferenceEntry<K, V> { StrongExpirableEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next)1065 StrongExpirableEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) { 1066 super(key, hash, next); 1067 } 1068 1069 // The code below is exactly the same for each expirable entry type. 1070 1071 volatile long time = Long.MAX_VALUE; 1072 1073 @Override getExpirationTime()1074 public long getExpirationTime() { 1075 return time; 1076 } 1077 1078 @Override setExpirationTime(long time)1079 public void setExpirationTime(long time) { 1080 this.time = time; 1081 } 1082 1083 @GuardedBy("Segment.this") 1084 ReferenceEntry<K, V> nextExpirable = nullEntry(); 1085 1086 @Override getNextExpirable()1087 public ReferenceEntry<K, V> getNextExpirable() { 1088 return nextExpirable; 1089 } 1090 1091 @Override setNextExpirable(ReferenceEntry<K, V> next)1092 public void setNextExpirable(ReferenceEntry<K, V> next) { 1093 this.nextExpirable = next; 1094 } 1095 1096 @GuardedBy("Segment.this") 1097 ReferenceEntry<K, V> previousExpirable = nullEntry(); 1098 1099 @Override getPreviousExpirable()1100 public ReferenceEntry<K, V> getPreviousExpirable() { 1101 return previousExpirable; 1102 } 1103 1104 @Override setPreviousExpirable(ReferenceEntry<K, V> previous)1105 public void setPreviousExpirable(ReferenceEntry<K, V> previous) { 1106 this.previousExpirable = previous; 1107 } 1108 } 1109 1110 static final class StrongEvictableEntry<K, V> 1111 extends StrongEntry<K, V> implements ReferenceEntry<K, V> { StrongEvictableEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next)1112 StrongEvictableEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) { 1113 super(key, hash, next); 1114 } 1115 1116 // The code below is exactly the same for each evictable entry type. 1117 1118 @GuardedBy("Segment.this") 1119 ReferenceEntry<K, V> nextEvictable = nullEntry(); 1120 1121 @Override getNextEvictable()1122 public ReferenceEntry<K, V> getNextEvictable() { 1123 return nextEvictable; 1124 } 1125 1126 @Override setNextEvictable(ReferenceEntry<K, V> next)1127 public void setNextEvictable(ReferenceEntry<K, V> next) { 1128 this.nextEvictable = next; 1129 } 1130 1131 @GuardedBy("Segment.this") 1132 ReferenceEntry<K, V> previousEvictable = nullEntry(); 1133 1134 @Override getPreviousEvictable()1135 public ReferenceEntry<K, V> getPreviousEvictable() { 1136 return previousEvictable; 1137 } 1138 1139 @Override setPreviousEvictable(ReferenceEntry<K, V> previous)1140 public void setPreviousEvictable(ReferenceEntry<K, V> previous) { 1141 this.previousEvictable = previous; 1142 } 1143 } 1144 1145 static final class StrongExpirableEvictableEntry<K, V> 1146 extends StrongEntry<K, V> implements ReferenceEntry<K, V> { StrongExpirableEvictableEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next)1147 StrongExpirableEvictableEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) { 1148 super(key, hash, next); 1149 } 1150 1151 // The code below is exactly the same for each expirable entry type. 1152 1153 volatile long time = Long.MAX_VALUE; 1154 1155 @Override getExpirationTime()1156 public long getExpirationTime() { 1157 return time; 1158 } 1159 1160 @Override setExpirationTime(long time)1161 public void setExpirationTime(long time) { 1162 this.time = time; 1163 } 1164 1165 @GuardedBy("Segment.this") 1166 ReferenceEntry<K, V> nextExpirable = nullEntry(); 1167 1168 @Override getNextExpirable()1169 public ReferenceEntry<K, V> getNextExpirable() { 1170 return nextExpirable; 1171 } 1172 1173 @Override setNextExpirable(ReferenceEntry<K, V> next)1174 public void setNextExpirable(ReferenceEntry<K, V> next) { 1175 this.nextExpirable = next; 1176 } 1177 1178 @GuardedBy("Segment.this") 1179 ReferenceEntry<K, V> previousExpirable = nullEntry(); 1180 1181 @Override getPreviousExpirable()1182 public ReferenceEntry<K, V> getPreviousExpirable() { 1183 return previousExpirable; 1184 } 1185 1186 @Override setPreviousExpirable(ReferenceEntry<K, V> previous)1187 public void setPreviousExpirable(ReferenceEntry<K, V> previous) { 1188 this.previousExpirable = previous; 1189 } 1190 1191 // The code below is exactly the same for each evictable entry type. 1192 1193 @GuardedBy("Segment.this") 1194 ReferenceEntry<K, V> nextEvictable = nullEntry(); 1195 1196 @Override getNextEvictable()1197 public ReferenceEntry<K, V> getNextEvictable() { 1198 return nextEvictable; 1199 } 1200 1201 @Override setNextEvictable(ReferenceEntry<K, V> next)1202 public void setNextEvictable(ReferenceEntry<K, V> next) { 1203 this.nextEvictable = next; 1204 } 1205 1206 @GuardedBy("Segment.this") 1207 ReferenceEntry<K, V> previousEvictable = nullEntry(); 1208 1209 @Override getPreviousEvictable()1210 public ReferenceEntry<K, V> getPreviousEvictable() { 1211 return previousEvictable; 1212 } 1213 1214 @Override setPreviousEvictable(ReferenceEntry<K, V> previous)1215 public void setPreviousEvictable(ReferenceEntry<K, V> previous) { 1216 this.previousEvictable = previous; 1217 } 1218 } 1219 1220 /** 1221 * Used for softly-referenced keys. 1222 */ 1223 static class SoftEntry<K, V> extends SoftReference<K> implements ReferenceEntry<K, V> { SoftEntry(ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next)1224 SoftEntry(ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 1225 super(key, queue); 1226 this.hash = hash; 1227 this.next = next; 1228 } 1229 1230 @Override getKey()1231 public K getKey() { 1232 return get(); 1233 } 1234 1235 // null expiration 1236 @Override getExpirationTime()1237 public long getExpirationTime() { 1238 throw new UnsupportedOperationException(); 1239 } 1240 1241 @Override setExpirationTime(long time)1242 public void setExpirationTime(long time) { 1243 throw new UnsupportedOperationException(); 1244 } 1245 1246 @Override getNextExpirable()1247 public ReferenceEntry<K, V> getNextExpirable() { 1248 throw new UnsupportedOperationException(); 1249 } 1250 1251 @Override setNextExpirable(ReferenceEntry<K, V> next)1252 public void setNextExpirable(ReferenceEntry<K, V> next) { 1253 throw new UnsupportedOperationException(); 1254 } 1255 1256 @Override getPreviousExpirable()1257 public ReferenceEntry<K, V> getPreviousExpirable() { 1258 throw new UnsupportedOperationException(); 1259 } 1260 1261 @Override setPreviousExpirable(ReferenceEntry<K, V> previous)1262 public void setPreviousExpirable(ReferenceEntry<K, V> previous) { 1263 throw new UnsupportedOperationException(); 1264 } 1265 1266 // null eviction 1267 1268 @Override getNextEvictable()1269 public ReferenceEntry<K, V> getNextEvictable() { 1270 throw new UnsupportedOperationException(); 1271 } 1272 1273 @Override setNextEvictable(ReferenceEntry<K, V> next)1274 public void setNextEvictable(ReferenceEntry<K, V> next) { 1275 throw new UnsupportedOperationException(); 1276 } 1277 1278 @Override getPreviousEvictable()1279 public ReferenceEntry<K, V> getPreviousEvictable() { 1280 throw new UnsupportedOperationException(); 1281 } 1282 1283 @Override setPreviousEvictable(ReferenceEntry<K, V> previous)1284 public void setPreviousEvictable(ReferenceEntry<K, V> previous) { 1285 throw new UnsupportedOperationException(); 1286 } 1287 1288 // The code below is exactly the same for each entry type. 1289 1290 final int hash; 1291 final ReferenceEntry<K, V> next; 1292 volatile ValueReference<K, V> valueReference = unset(); 1293 1294 @Override getValueReference()1295 public ValueReference<K, V> getValueReference() { 1296 return valueReference; 1297 } 1298 1299 @Override setValueReference(ValueReference<K, V> valueReference)1300 public void setValueReference(ValueReference<K, V> valueReference) { 1301 ValueReference<K, V> previous = this.valueReference; 1302 this.valueReference = valueReference; 1303 previous.clear(valueReference); 1304 } 1305 1306 @Override getHash()1307 public int getHash() { 1308 return hash; 1309 } 1310 1311 @Override getNext()1312 public ReferenceEntry<K, V> getNext() { 1313 return next; 1314 } 1315 } 1316 1317 static final class SoftExpirableEntry<K, V> 1318 extends SoftEntry<K, V> implements ReferenceEntry<K, V> { SoftExpirableEntry( ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next)1319 SoftExpirableEntry( 1320 ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 1321 super(queue, key, hash, next); 1322 } 1323 1324 // The code below is exactly the same for each expirable entry type. 1325 1326 volatile long time = Long.MAX_VALUE; 1327 1328 @Override getExpirationTime()1329 public long getExpirationTime() { 1330 return time; 1331 } 1332 1333 @Override setExpirationTime(long time)1334 public void setExpirationTime(long time) { 1335 this.time = time; 1336 } 1337 1338 @GuardedBy("Segment.this") 1339 ReferenceEntry<K, V> nextExpirable = nullEntry(); 1340 1341 @Override getNextExpirable()1342 public ReferenceEntry<K, V> getNextExpirable() { 1343 return nextExpirable; 1344 } 1345 1346 @Override setNextExpirable(ReferenceEntry<K, V> next)1347 public void setNextExpirable(ReferenceEntry<K, V> next) { 1348 this.nextExpirable = next; 1349 } 1350 1351 @GuardedBy("Segment.this") 1352 ReferenceEntry<K, V> previousExpirable = nullEntry(); 1353 1354 @Override getPreviousExpirable()1355 public ReferenceEntry<K, V> getPreviousExpirable() { 1356 return previousExpirable; 1357 } 1358 1359 @Override setPreviousExpirable(ReferenceEntry<K, V> previous)1360 public void setPreviousExpirable(ReferenceEntry<K, V> previous) { 1361 this.previousExpirable = previous; 1362 } 1363 } 1364 1365 static final class SoftEvictableEntry<K, V> 1366 extends SoftEntry<K, V> implements ReferenceEntry<K, V> { SoftEvictableEntry( ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next)1367 SoftEvictableEntry( 1368 ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 1369 super(queue, key, hash, next); 1370 } 1371 1372 // The code below is exactly the same for each evictable entry type. 1373 1374 @GuardedBy("Segment.this") 1375 ReferenceEntry<K, V> nextEvictable = nullEntry(); 1376 1377 @Override getNextEvictable()1378 public ReferenceEntry<K, V> getNextEvictable() { 1379 return nextEvictable; 1380 } 1381 1382 @Override setNextEvictable(ReferenceEntry<K, V> next)1383 public void setNextEvictable(ReferenceEntry<K, V> next) { 1384 this.nextEvictable = next; 1385 } 1386 1387 @GuardedBy("Segment.this") 1388 ReferenceEntry<K, V> previousEvictable = nullEntry(); 1389 1390 @Override getPreviousEvictable()1391 public ReferenceEntry<K, V> getPreviousEvictable() { 1392 return previousEvictable; 1393 } 1394 1395 @Override setPreviousEvictable(ReferenceEntry<K, V> previous)1396 public void setPreviousEvictable(ReferenceEntry<K, V> previous) { 1397 this.previousEvictable = previous; 1398 } 1399 } 1400 1401 static final class SoftExpirableEvictableEntry<K, V> 1402 extends SoftEntry<K, V> implements ReferenceEntry<K, V> { SoftExpirableEvictableEntry( ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next)1403 SoftExpirableEvictableEntry( 1404 ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 1405 super(queue, key, hash, next); 1406 } 1407 1408 // The code below is exactly the same for each expirable entry type. 1409 1410 volatile long time = Long.MAX_VALUE; 1411 1412 @Override getExpirationTime()1413 public long getExpirationTime() { 1414 return time; 1415 } 1416 1417 @Override setExpirationTime(long time)1418 public void setExpirationTime(long time) { 1419 this.time = time; 1420 } 1421 1422 @GuardedBy("Segment.this") 1423 ReferenceEntry<K, V> nextExpirable = nullEntry(); 1424 1425 @Override getNextExpirable()1426 public ReferenceEntry<K, V> getNextExpirable() { 1427 return nextExpirable; 1428 } 1429 1430 @Override setNextExpirable(ReferenceEntry<K, V> next)1431 public void setNextExpirable(ReferenceEntry<K, V> next) { 1432 this.nextExpirable = next; 1433 } 1434 1435 @GuardedBy("Segment.this") 1436 ReferenceEntry<K, V> previousExpirable = nullEntry(); 1437 1438 @Override getPreviousExpirable()1439 public ReferenceEntry<K, V> getPreviousExpirable() { 1440 return previousExpirable; 1441 } 1442 1443 @Override setPreviousExpirable(ReferenceEntry<K, V> previous)1444 public void setPreviousExpirable(ReferenceEntry<K, V> previous) { 1445 this.previousExpirable = previous; 1446 } 1447 1448 // The code below is exactly the same for each evictable entry type. 1449 1450 @GuardedBy("Segment.this") 1451 ReferenceEntry<K, V> nextEvictable = nullEntry(); 1452 1453 @Override getNextEvictable()1454 public ReferenceEntry<K, V> getNextEvictable() { 1455 return nextEvictable; 1456 } 1457 1458 @Override setNextEvictable(ReferenceEntry<K, V> next)1459 public void setNextEvictable(ReferenceEntry<K, V> next) { 1460 this.nextEvictable = next; 1461 } 1462 1463 @GuardedBy("Segment.this") 1464 ReferenceEntry<K, V> previousEvictable = nullEntry(); 1465 1466 @Override getPreviousEvictable()1467 public ReferenceEntry<K, V> getPreviousEvictable() { 1468 return previousEvictable; 1469 } 1470 1471 @Override setPreviousEvictable(ReferenceEntry<K, V> previous)1472 public void setPreviousEvictable(ReferenceEntry<K, V> previous) { 1473 this.previousEvictable = previous; 1474 } 1475 } 1476 1477 /** 1478 * Used for weakly-referenced keys. 1479 */ 1480 static class WeakEntry<K, V> extends WeakReference<K> implements ReferenceEntry<K, V> { WeakEntry(ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next)1481 WeakEntry(ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 1482 super(key, queue); 1483 this.hash = hash; 1484 this.next = next; 1485 } 1486 1487 @Override getKey()1488 public K getKey() { 1489 return get(); 1490 } 1491 1492 // null expiration 1493 1494 @Override getExpirationTime()1495 public long getExpirationTime() { 1496 throw new UnsupportedOperationException(); 1497 } 1498 1499 @Override setExpirationTime(long time)1500 public void setExpirationTime(long time) { 1501 throw new UnsupportedOperationException(); 1502 } 1503 1504 @Override getNextExpirable()1505 public ReferenceEntry<K, V> getNextExpirable() { 1506 throw new UnsupportedOperationException(); 1507 } 1508 1509 @Override setNextExpirable(ReferenceEntry<K, V> next)1510 public void setNextExpirable(ReferenceEntry<K, V> next) { 1511 throw new UnsupportedOperationException(); 1512 } 1513 1514 @Override getPreviousExpirable()1515 public ReferenceEntry<K, V> getPreviousExpirable() { 1516 throw new UnsupportedOperationException(); 1517 } 1518 1519 @Override setPreviousExpirable(ReferenceEntry<K, V> previous)1520 public void setPreviousExpirable(ReferenceEntry<K, V> previous) { 1521 throw new UnsupportedOperationException(); 1522 } 1523 1524 // null eviction 1525 1526 @Override getNextEvictable()1527 public ReferenceEntry<K, V> getNextEvictable() { 1528 throw new UnsupportedOperationException(); 1529 } 1530 1531 @Override setNextEvictable(ReferenceEntry<K, V> next)1532 public void setNextEvictable(ReferenceEntry<K, V> next) { 1533 throw new UnsupportedOperationException(); 1534 } 1535 1536 @Override getPreviousEvictable()1537 public ReferenceEntry<K, V> getPreviousEvictable() { 1538 throw new UnsupportedOperationException(); 1539 } 1540 1541 @Override setPreviousEvictable(ReferenceEntry<K, V> previous)1542 public void setPreviousEvictable(ReferenceEntry<K, V> previous) { 1543 throw new UnsupportedOperationException(); 1544 } 1545 1546 // The code below is exactly the same for each entry type. 1547 1548 final int hash; 1549 final ReferenceEntry<K, V> next; 1550 volatile ValueReference<K, V> valueReference = unset(); 1551 1552 @Override getValueReference()1553 public ValueReference<K, V> getValueReference() { 1554 return valueReference; 1555 } 1556 1557 @Override setValueReference(ValueReference<K, V> valueReference)1558 public void setValueReference(ValueReference<K, V> valueReference) { 1559 ValueReference<K, V> previous = this.valueReference; 1560 this.valueReference = valueReference; 1561 previous.clear(valueReference); 1562 } 1563 1564 @Override getHash()1565 public int getHash() { 1566 return hash; 1567 } 1568 1569 @Override getNext()1570 public ReferenceEntry<K, V> getNext() { 1571 return next; 1572 } 1573 } 1574 1575 static final class WeakExpirableEntry<K, V> 1576 extends WeakEntry<K, V> implements ReferenceEntry<K, V> { WeakExpirableEntry( ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next)1577 WeakExpirableEntry( 1578 ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 1579 super(queue, key, hash, next); 1580 } 1581 1582 // The code below is exactly the same for each expirable entry type. 1583 1584 volatile long time = Long.MAX_VALUE; 1585 1586 @Override getExpirationTime()1587 public long getExpirationTime() { 1588 return time; 1589 } 1590 1591 @Override setExpirationTime(long time)1592 public void setExpirationTime(long time) { 1593 this.time = time; 1594 } 1595 1596 @GuardedBy("Segment.this") 1597 ReferenceEntry<K, V> nextExpirable = nullEntry(); 1598 1599 @Override getNextExpirable()1600 public ReferenceEntry<K, V> getNextExpirable() { 1601 return nextExpirable; 1602 } 1603 1604 @Override setNextExpirable(ReferenceEntry<K, V> next)1605 public void setNextExpirable(ReferenceEntry<K, V> next) { 1606 this.nextExpirable = next; 1607 } 1608 1609 @GuardedBy("Segment.this") 1610 ReferenceEntry<K, V> previousExpirable = nullEntry(); 1611 1612 @Override getPreviousExpirable()1613 public ReferenceEntry<K, V> getPreviousExpirable() { 1614 return previousExpirable; 1615 } 1616 1617 @Override setPreviousExpirable(ReferenceEntry<K, V> previous)1618 public void setPreviousExpirable(ReferenceEntry<K, V> previous) { 1619 this.previousExpirable = previous; 1620 } 1621 } 1622 1623 static final class WeakEvictableEntry<K, V> 1624 extends WeakEntry<K, V> implements ReferenceEntry<K, V> { WeakEvictableEntry( ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next)1625 WeakEvictableEntry( 1626 ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 1627 super(queue, key, hash, next); 1628 } 1629 1630 // The code below is exactly the same for each evictable entry type. 1631 1632 @GuardedBy("Segment.this") 1633 ReferenceEntry<K, V> nextEvictable = nullEntry(); 1634 1635 @Override getNextEvictable()1636 public ReferenceEntry<K, V> getNextEvictable() { 1637 return nextEvictable; 1638 } 1639 1640 @Override setNextEvictable(ReferenceEntry<K, V> next)1641 public void setNextEvictable(ReferenceEntry<K, V> next) { 1642 this.nextEvictable = next; 1643 } 1644 1645 @GuardedBy("Segment.this") 1646 ReferenceEntry<K, V> previousEvictable = nullEntry(); 1647 1648 @Override getPreviousEvictable()1649 public ReferenceEntry<K, V> getPreviousEvictable() { 1650 return previousEvictable; 1651 } 1652 1653 @Override setPreviousEvictable(ReferenceEntry<K, V> previous)1654 public void setPreviousEvictable(ReferenceEntry<K, V> previous) { 1655 this.previousEvictable = previous; 1656 } 1657 } 1658 1659 static final class WeakExpirableEvictableEntry<K, V> 1660 extends WeakEntry<K, V> implements ReferenceEntry<K, V> { WeakExpirableEvictableEntry( ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next)1661 WeakExpirableEvictableEntry( 1662 ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next) { 1663 super(queue, key, hash, next); 1664 } 1665 1666 // The code below is exactly the same for each expirable entry type. 1667 1668 volatile long time = Long.MAX_VALUE; 1669 1670 @Override getExpirationTime()1671 public long getExpirationTime() { 1672 return time; 1673 } 1674 1675 @Override setExpirationTime(long time)1676 public void setExpirationTime(long time) { 1677 this.time = time; 1678 } 1679 1680 @GuardedBy("Segment.this") 1681 ReferenceEntry<K, V> nextExpirable = nullEntry(); 1682 1683 @Override getNextExpirable()1684 public ReferenceEntry<K, V> getNextExpirable() { 1685 return nextExpirable; 1686 } 1687 1688 @Override setNextExpirable(ReferenceEntry<K, V> next)1689 public void setNextExpirable(ReferenceEntry<K, V> next) { 1690 this.nextExpirable = next; 1691 } 1692 1693 @GuardedBy("Segment.this") 1694 ReferenceEntry<K, V> previousExpirable = nullEntry(); 1695 1696 @Override getPreviousExpirable()1697 public ReferenceEntry<K, V> getPreviousExpirable() { 1698 return previousExpirable; 1699 } 1700 1701 @Override setPreviousExpirable(ReferenceEntry<K, V> previous)1702 public void setPreviousExpirable(ReferenceEntry<K, V> previous) { 1703 this.previousExpirable = previous; 1704 } 1705 1706 // The code below is exactly the same for each evictable entry type. 1707 1708 @GuardedBy("Segment.this") 1709 ReferenceEntry<K, V> nextEvictable = nullEntry(); 1710 1711 @Override getNextEvictable()1712 public ReferenceEntry<K, V> getNextEvictable() { 1713 return nextEvictable; 1714 } 1715 1716 @Override setNextEvictable(ReferenceEntry<K, V> next)1717 public void setNextEvictable(ReferenceEntry<K, V> next) { 1718 this.nextEvictable = next; 1719 } 1720 1721 @GuardedBy("Segment.this") 1722 ReferenceEntry<K, V> previousEvictable = nullEntry(); 1723 1724 @Override getPreviousEvictable()1725 public ReferenceEntry<K, V> getPreviousEvictable() { 1726 return previousEvictable; 1727 } 1728 1729 @Override setPreviousEvictable(ReferenceEntry<K, V> previous)1730 public void setPreviousEvictable(ReferenceEntry<K, V> previous) { 1731 this.previousEvictable = previous; 1732 } 1733 } 1734 1735 /** 1736 * References a weak value. 1737 */ 1738 static final class WeakValueReference<K, V> 1739 extends WeakReference<V> implements ValueReference<K, V> { 1740 final ReferenceEntry<K, V> entry; 1741 WeakValueReference(ReferenceQueue<V> queue, V referent, ReferenceEntry<K, V> entry)1742 WeakValueReference(ReferenceQueue<V> queue, V referent, ReferenceEntry<K, V> entry) { 1743 super(referent, queue); 1744 this.entry = entry; 1745 } 1746 1747 @Override getEntry()1748 public ReferenceEntry<K, V> getEntry() { 1749 return entry; 1750 } 1751 1752 @Override clear(ValueReference<K, V> newValue)1753 public void clear(ValueReference<K, V> newValue) { 1754 clear(); 1755 } 1756 1757 @Override copyFor( ReferenceQueue<V> queue, ReferenceEntry<K, V> entry)1758 public ValueReference<K, V> copyFor( 1759 ReferenceQueue<V> queue, ReferenceEntry<K, V> entry) { 1760 return new WeakValueReference<K, V>(queue, get(), entry); 1761 } 1762 1763 @Override isComputingReference()1764 public boolean isComputingReference() { 1765 return false; 1766 } 1767 1768 @Override waitForValue()1769 public V waitForValue() { 1770 return get(); 1771 } 1772 } 1773 1774 /** 1775 * References a soft value. 1776 */ 1777 static final class SoftValueReference<K, V> 1778 extends SoftReference<V> implements ValueReference<K, V> { 1779 final ReferenceEntry<K, V> entry; 1780 SoftValueReference(ReferenceQueue<V> queue, V referent, ReferenceEntry<K, V> entry)1781 SoftValueReference(ReferenceQueue<V> queue, V referent, ReferenceEntry<K, V> entry) { 1782 super(referent, queue); 1783 this.entry = entry; 1784 } 1785 1786 @Override getEntry()1787 public ReferenceEntry<K, V> getEntry() { 1788 return entry; 1789 } 1790 1791 @Override clear(ValueReference<K, V> newValue)1792 public void clear(ValueReference<K, V> newValue) { 1793 clear(); 1794 } 1795 1796 @Override copyFor(ReferenceQueue<V> queue, ReferenceEntry<K, V> entry)1797 public ValueReference<K, V> copyFor(ReferenceQueue<V> queue, ReferenceEntry<K, V> entry) { 1798 return new SoftValueReference<K, V>(queue, get(), entry); 1799 } 1800 1801 @Override isComputingReference()1802 public boolean isComputingReference() { 1803 return false; 1804 } 1805 1806 @Override waitForValue()1807 public V waitForValue() { 1808 return get(); 1809 } 1810 } 1811 1812 /** 1813 * References a strong value. 1814 */ 1815 static final class StrongValueReference<K, V> implements ValueReference<K, V> { 1816 final V referent; 1817 StrongValueReference(V referent)1818 StrongValueReference(V referent) { 1819 this.referent = referent; 1820 } 1821 1822 @Override get()1823 public V get() { 1824 return referent; 1825 } 1826 1827 @Override getEntry()1828 public ReferenceEntry<K, V> getEntry() { 1829 return null; 1830 } 1831 1832 @Override copyFor(ReferenceQueue<V> queue, ReferenceEntry<K, V> entry)1833 public ValueReference<K, V> copyFor(ReferenceQueue<V> queue, ReferenceEntry<K, V> entry) { 1834 return this; 1835 } 1836 1837 @Override isComputingReference()1838 public boolean isComputingReference() { 1839 return false; 1840 } 1841 1842 @Override waitForValue()1843 public V waitForValue() { 1844 return get(); 1845 } 1846 1847 @Override clear(ValueReference<K, V> newValue)1848 public void clear(ValueReference<K, V> newValue) {} 1849 } 1850 1851 /** 1852 * Applies a supplemental hash function to a given hash code, which defends against poor quality 1853 * hash functions. This is critical when the concurrent hash map uses power-of-two length hash 1854 * tables, that otherwise encounter collisions for hash codes that do not differ in lower or 1855 * upper bits. 1856 * 1857 * @param h hash code 1858 */ rehash(int h)1859 static int rehash(int h) { 1860 // Spread bits to regularize both segment and index locations, 1861 // using variant of single-word Wang/Jenkins hash. 1862 // TODO(kevinb): use Hashing/move this to Hashing? 1863 h += (h << 15) ^ 0xffffcd7d; 1864 h ^= (h >>> 10); 1865 h += (h << 3); 1866 h ^= (h >>> 6); 1867 h += (h << 2) + (h << 14); 1868 return h ^ (h >>> 16); 1869 } 1870 1871 /** 1872 * This method is a convenience for testing. Code should call {@link Segment#newEntry} directly. 1873 */ 1874 @GuardedBy("Segment.this") 1875 @VisibleForTesting newEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next)1876 ReferenceEntry<K, V> newEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) { 1877 return segmentFor(hash).newEntry(key, hash, next); 1878 } 1879 1880 /** 1881 * This method is a convenience for testing. Code should call {@link Segment#copyEntry} directly. 1882 */ 1883 @GuardedBy("Segment.this") 1884 @VisibleForTesting copyEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext)1885 ReferenceEntry<K, V> copyEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) { 1886 int hash = original.getHash(); 1887 return segmentFor(hash).copyEntry(original, newNext); 1888 } 1889 1890 /** 1891 * This method is a convenience for testing. Code should call {@link Segment#setValue} instead. 1892 */ 1893 @GuardedBy("Segment.this") 1894 @VisibleForTesting newValueReference(ReferenceEntry<K, V> entry, V value)1895 ValueReference<K, V> newValueReference(ReferenceEntry<K, V> entry, V value) { 1896 int hash = entry.getHash(); 1897 return valueStrength.referenceValue(segmentFor(hash), entry, value); 1898 } 1899 hash(Object key)1900 int hash(Object key) { 1901 int h = keyEquivalence.hash(key); 1902 return rehash(h); 1903 } 1904 reclaimValue(ValueReference<K, V> valueReference)1905 void reclaimValue(ValueReference<K, V> valueReference) { 1906 ReferenceEntry<K, V> entry = valueReference.getEntry(); 1907 int hash = entry.getHash(); 1908 segmentFor(hash).reclaimValue(entry.getKey(), hash, valueReference); 1909 } 1910 reclaimKey(ReferenceEntry<K, V> entry)1911 void reclaimKey(ReferenceEntry<K, V> entry) { 1912 int hash = entry.getHash(); 1913 segmentFor(hash).reclaimKey(entry, hash); 1914 } 1915 1916 /** 1917 * This method is a convenience for testing. Code should call {@link Segment#getLiveValue} 1918 * instead. 1919 */ 1920 @VisibleForTesting isLive(ReferenceEntry<K, V> entry)1921 boolean isLive(ReferenceEntry<K, V> entry) { 1922 return segmentFor(entry.getHash()).getLiveValue(entry) != null; 1923 } 1924 1925 /** 1926 * Returns the segment that should be used for a key with the given hash. 1927 * 1928 * @param hash the hash code for the key 1929 * @return the segment 1930 */ segmentFor(int hash)1931 Segment<K, V> segmentFor(int hash) { 1932 // TODO(fry): Lazily create segments? 1933 return segments[(hash >>> segmentShift) & segmentMask]; 1934 } 1935 createSegment(int initialCapacity, int maxSegmentSize)1936 Segment<K, V> createSegment(int initialCapacity, int maxSegmentSize) { 1937 return new Segment<K, V>(this, initialCapacity, maxSegmentSize); 1938 } 1939 1940 /** 1941 * Gets the value from an entry. Returns {@code null} if the entry is invalid, 1942 * partially-collected, computing, or expired. Unlike {@link Segment#getLiveValue} this method 1943 * does not attempt to clean up stale entries. 1944 */ getLiveValue(ReferenceEntry<K, V> entry)1945 V getLiveValue(ReferenceEntry<K, V> entry) { 1946 if (entry.getKey() == null) { 1947 return null; 1948 } 1949 V value = entry.getValueReference().get(); 1950 if (value == null) { 1951 return null; 1952 } 1953 1954 if (expires() && isExpired(entry)) { 1955 return null; 1956 } 1957 return value; 1958 } 1959 1960 // expiration 1961 1962 /** 1963 * Returns {@code true} if the entry has expired. 1964 */ isExpired(ReferenceEntry<K, V> entry)1965 boolean isExpired(ReferenceEntry<K, V> entry) { 1966 return isExpired(entry, ticker.read()); 1967 } 1968 1969 /** 1970 * Returns {@code true} if the entry has expired. 1971 */ isExpired(ReferenceEntry<K, V> entry, long now)1972 boolean isExpired(ReferenceEntry<K, V> entry, long now) { 1973 // if the expiration time had overflowed, this "undoes" the overflow 1974 return now - entry.getExpirationTime() > 0; 1975 } 1976 1977 @GuardedBy("Segment.this") connectExpirables(ReferenceEntry<K, V> previous, ReferenceEntry<K, V> next)1978 static <K, V> void connectExpirables(ReferenceEntry<K, V> previous, ReferenceEntry<K, V> next) { 1979 previous.setNextExpirable(next); 1980 next.setPreviousExpirable(previous); 1981 } 1982 1983 @GuardedBy("Segment.this") nullifyExpirable(ReferenceEntry<K, V> nulled)1984 static <K, V> void nullifyExpirable(ReferenceEntry<K, V> nulled) { 1985 ReferenceEntry<K, V> nullEntry = nullEntry(); 1986 nulled.setNextExpirable(nullEntry); 1987 nulled.setPreviousExpirable(nullEntry); 1988 } 1989 1990 // eviction 1991 1992 /** 1993 * Notifies listeners that an entry has been automatically removed due to expiration, eviction, 1994 * or eligibility for garbage collection. This should be called every time expireEntries or 1995 * evictEntry is called (once the lock is released). 1996 */ processPendingNotifications()1997 void processPendingNotifications() { 1998 RemovalNotification<K, V> notification; 1999 while ((notification = removalNotificationQueue.poll()) != null) { 2000 try { 2001 removalListener.onRemoval(notification); 2002 } catch (Exception e) { 2003 logger.log(Level.WARNING, "Exception thrown by removal listener", e); 2004 } 2005 } 2006 } 2007 2008 /** Links the evitables together. */ 2009 @GuardedBy("Segment.this") connectEvictables(ReferenceEntry<K, V> previous, ReferenceEntry<K, V> next)2010 static <K, V> void connectEvictables(ReferenceEntry<K, V> previous, ReferenceEntry<K, V> next) { 2011 previous.setNextEvictable(next); 2012 next.setPreviousEvictable(previous); 2013 } 2014 2015 @GuardedBy("Segment.this") nullifyEvictable(ReferenceEntry<K, V> nulled)2016 static <K, V> void nullifyEvictable(ReferenceEntry<K, V> nulled) { 2017 ReferenceEntry<K, V> nullEntry = nullEntry(); 2018 nulled.setNextEvictable(nullEntry); 2019 nulled.setPreviousEvictable(nullEntry); 2020 } 2021 2022 @SuppressWarnings("unchecked") newSegmentArray(int ssize)2023 final Segment<K, V>[] newSegmentArray(int ssize) { 2024 return new Segment[ssize]; 2025 } 2026 2027 // Inner Classes 2028 2029 /** 2030 * Segments are specialized versions of hash tables. This subclass inherits from ReentrantLock 2031 * opportunistically, just to simplify some locking and avoid separate construction. 2032 */ 2033 @SuppressWarnings("serial") // This class is never serialized. 2034 static class Segment<K, V> extends ReentrantLock { 2035 2036 /* 2037 * TODO(fry): Consider copying variables (like evictsBySize) from outer class into this class. 2038 * It will require more memory but will reduce indirection. 2039 */ 2040 2041 /* 2042 * Segments maintain a table of entry lists that are ALWAYS kept in a consistent state, so can 2043 * be read without locking. Next fields of nodes are immutable (final). All list additions are 2044 * performed at the front of each bin. This makes it easy to check changes, and also fast to 2045 * traverse. When nodes would otherwise be changed, new nodes are created to replace them. This 2046 * works well for hash tables since the bin lists tend to be short. (The average length is less 2047 * than two.) 2048 * 2049 * Read operations can thus proceed without locking, but rely on selected uses of volatiles to 2050 * ensure that completed write operations performed by other threads are noticed. For most 2051 * purposes, the "count" field, tracking the number of elements, serves as that volatile 2052 * variable ensuring visibility. This is convenient because this field needs to be read in many 2053 * read operations anyway: 2054 * 2055 * - All (unsynchronized) read operations must first read the "count" field, and should not 2056 * look at table entries if it is 0. 2057 * 2058 * - All (synchronized) write operations should write to the "count" field after structurally 2059 * changing any bin. The operations must not take any action that could even momentarily 2060 * cause a concurrent read operation to see inconsistent data. This is made easier by the 2061 * nature of the read operations in Map. For example, no operation can reveal that the table 2062 * has grown but the threshold has not yet been updated, so there are no atomicity requirements 2063 * for this with respect to reads. 2064 * 2065 * As a guide, all critical volatile reads and writes to the count field are marked in code 2066 * comments. 2067 */ 2068 2069 final MapMakerInternalMap<K, V> map; 2070 2071 /** 2072 * The number of live elements in this segment's region. This does not include unset elements 2073 * which are awaiting cleanup. 2074 */ 2075 volatile int count; 2076 2077 /** 2078 * Number of updates that alter the size of the table. This is used during bulk-read methods to 2079 * make sure they see a consistent snapshot: If modCounts change during a traversal of segments 2080 * computing size or checking containsValue, then we might have an inconsistent view of state 2081 * so (usually) must retry. 2082 */ 2083 int modCount; 2084 2085 /** 2086 * The table is expanded when its size exceeds this threshold. (The value of this field is 2087 * always {@code (int)(capacity * 0.75)}.) 2088 */ 2089 int threshold; 2090 2091 /** 2092 * The per-segment table. 2093 */ 2094 volatile AtomicReferenceArray<ReferenceEntry<K, V>> table; 2095 2096 /** 2097 * The maximum size of this map. MapMaker.UNSET_INT if there is no maximum. 2098 */ 2099 final int maxSegmentSize; 2100 2101 /** 2102 * The key reference queue contains entries whose keys have been garbage collected, and which 2103 * need to be cleaned up internally. 2104 */ 2105 final ReferenceQueue<K> keyReferenceQueue; 2106 2107 /** 2108 * The value reference queue contains value references whose values have been garbage collected, 2109 * and which need to be cleaned up internally. 2110 */ 2111 final ReferenceQueue<V> valueReferenceQueue; 2112 2113 /** 2114 * The recency queue is used to record which entries were accessed for updating the eviction 2115 * list's ordering. It is drained as a batch operation when either the DRAIN_THRESHOLD is 2116 * crossed or a write occurs on the segment. 2117 */ 2118 final Queue<ReferenceEntry<K, V>> recencyQueue; 2119 2120 /** 2121 * A counter of the number of reads since the last write, used to drain queues on a small 2122 * fraction of read operations. 2123 */ 2124 final AtomicInteger readCount = new AtomicInteger(); 2125 2126 /** 2127 * A queue of elements currently in the map, ordered by access time. Elements are added to the 2128 * tail of the queue on access/write. 2129 */ 2130 @GuardedBy("Segment.this") 2131 final Queue<ReferenceEntry<K, V>> evictionQueue; 2132 2133 /** 2134 * A queue of elements currently in the map, ordered by expiration time (either access or write 2135 * time). Elements are added to the tail of the queue on access/write. 2136 */ 2137 @GuardedBy("Segment.this") 2138 final Queue<ReferenceEntry<K, V>> expirationQueue; 2139 Segment(MapMakerInternalMap<K, V> map, int initialCapacity, int maxSegmentSize)2140 Segment(MapMakerInternalMap<K, V> map, int initialCapacity, int maxSegmentSize) { 2141 this.map = map; 2142 this.maxSegmentSize = maxSegmentSize; 2143 initTable(newEntryArray(initialCapacity)); 2144 2145 keyReferenceQueue = map.usesKeyReferences() 2146 ? new ReferenceQueue<K>() : null; 2147 2148 valueReferenceQueue = map.usesValueReferences() 2149 ? new ReferenceQueue<V>() : null; 2150 2151 recencyQueue = (map.evictsBySize() || map.expiresAfterAccess()) 2152 ? new ConcurrentLinkedQueue<ReferenceEntry<K, V>>() 2153 : MapMakerInternalMap.<ReferenceEntry<K, V>>discardingQueue(); 2154 2155 evictionQueue = map.evictsBySize() 2156 ? new EvictionQueue<K, V>() 2157 : MapMakerInternalMap.<ReferenceEntry<K, V>>discardingQueue(); 2158 2159 expirationQueue = map.expires() 2160 ? new ExpirationQueue<K, V>() 2161 : MapMakerInternalMap.<ReferenceEntry<K, V>>discardingQueue(); 2162 } 2163 newEntryArray(int size)2164 AtomicReferenceArray<ReferenceEntry<K, V>> newEntryArray(int size) { 2165 return new AtomicReferenceArray<ReferenceEntry<K, V>>(size); 2166 } 2167 initTable(AtomicReferenceArray<ReferenceEntry<K, V>> newTable)2168 void initTable(AtomicReferenceArray<ReferenceEntry<K, V>> newTable) { 2169 this.threshold = newTable.length() * 3 / 4; // 0.75 2170 if (this.threshold == maxSegmentSize) { 2171 // prevent spurious expansion before eviction 2172 this.threshold++; 2173 } 2174 this.table = newTable; 2175 } 2176 2177 @GuardedBy("Segment.this") newEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next)2178 ReferenceEntry<K, V> newEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) { 2179 return map.entryFactory.newEntry(this, key, hash, next); 2180 } 2181 2182 @GuardedBy("Segment.this") copyEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext)2183 ReferenceEntry<K, V> copyEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) { 2184 ValueReference<K, V> valueReference = original.getValueReference(); 2185 ReferenceEntry<K, V> newEntry = map.entryFactory.copyEntry(this, original, newNext); 2186 newEntry.setValueReference(valueReference.copyFor(this.valueReferenceQueue, newEntry)); 2187 return newEntry; 2188 } 2189 2190 /** 2191 * Sets a new value of an entry. Adds newly created entries at the end of the expiration queue. 2192 */ 2193 @GuardedBy("Segment.this") setValue(ReferenceEntry<K, V> entry, V value)2194 void setValue(ReferenceEntry<K, V> entry, V value) { 2195 ValueReference<K, V> valueReference = map.valueStrength.referenceValue(this, entry, value); 2196 entry.setValueReference(valueReference); 2197 recordWrite(entry); 2198 } 2199 2200 // reference queues, for garbage collection cleanup 2201 2202 /** 2203 * Cleanup collected entries when the lock is available. 2204 */ tryDrainReferenceQueues()2205 void tryDrainReferenceQueues() { 2206 if (tryLock()) { 2207 try { 2208 drainReferenceQueues(); 2209 } finally { 2210 unlock(); 2211 } 2212 } 2213 } 2214 2215 /** 2216 * Drain the key and value reference queues, cleaning up internal entries containing garbage 2217 * collected keys or values. 2218 */ 2219 @GuardedBy("Segment.this") drainReferenceQueues()2220 void drainReferenceQueues() { 2221 if (map.usesKeyReferences()) { 2222 drainKeyReferenceQueue(); 2223 } 2224 if (map.usesValueReferences()) { 2225 drainValueReferenceQueue(); 2226 } 2227 } 2228 2229 @GuardedBy("Segment.this") drainKeyReferenceQueue()2230 void drainKeyReferenceQueue() { 2231 Reference<? extends K> ref; 2232 int i = 0; 2233 while ((ref = keyReferenceQueue.poll()) != null) { 2234 @SuppressWarnings("unchecked") 2235 ReferenceEntry<K, V> entry = (ReferenceEntry<K, V>) ref; 2236 map.reclaimKey(entry); 2237 if (++i == DRAIN_MAX) { 2238 break; 2239 } 2240 } 2241 } 2242 2243 @GuardedBy("Segment.this") drainValueReferenceQueue()2244 void drainValueReferenceQueue() { 2245 Reference<? extends V> ref; 2246 int i = 0; 2247 while ((ref = valueReferenceQueue.poll()) != null) { 2248 @SuppressWarnings("unchecked") 2249 ValueReference<K, V> valueReference = (ValueReference<K, V>) ref; 2250 map.reclaimValue(valueReference); 2251 if (++i == DRAIN_MAX) { 2252 break; 2253 } 2254 } 2255 } 2256 2257 /** 2258 * Clears all entries from the key and value reference queues. 2259 */ clearReferenceQueues()2260 void clearReferenceQueues() { 2261 if (map.usesKeyReferences()) { 2262 clearKeyReferenceQueue(); 2263 } 2264 if (map.usesValueReferences()) { 2265 clearValueReferenceQueue(); 2266 } 2267 } 2268 clearKeyReferenceQueue()2269 void clearKeyReferenceQueue() { 2270 while (keyReferenceQueue.poll() != null) {} 2271 } 2272 clearValueReferenceQueue()2273 void clearValueReferenceQueue() { 2274 while (valueReferenceQueue.poll() != null) {} 2275 } 2276 2277 // recency queue, shared by expiration and eviction 2278 2279 /** 2280 * Records the relative order in which this read was performed by adding {@code entry} to the 2281 * recency queue. At write-time, or when the queue is full past the threshold, the queue will 2282 * be drained and the entries therein processed. 2283 * 2284 * <p>Note: locked reads should use {@link #recordLockedRead}. 2285 */ recordRead(ReferenceEntry<K, V> entry)2286 void recordRead(ReferenceEntry<K, V> entry) { 2287 if (map.expiresAfterAccess()) { 2288 recordExpirationTime(entry, map.expireAfterAccessNanos); 2289 } 2290 recencyQueue.add(entry); 2291 } 2292 2293 /** 2294 * Updates the eviction metadata that {@code entry} was just read. This currently amounts to 2295 * adding {@code entry} to relevant eviction lists. 2296 * 2297 * <p>Note: this method should only be called under lock, as it directly manipulates the 2298 * eviction queues. Unlocked reads should use {@link #recordRead}. 2299 */ 2300 @GuardedBy("Segment.this") recordLockedRead(ReferenceEntry<K, V> entry)2301 void recordLockedRead(ReferenceEntry<K, V> entry) { 2302 evictionQueue.add(entry); 2303 if (map.expiresAfterAccess()) { 2304 recordExpirationTime(entry, map.expireAfterAccessNanos); 2305 expirationQueue.add(entry); 2306 } 2307 } 2308 2309 /** 2310 * Updates eviction metadata that {@code entry} was just written. This currently amounts to 2311 * adding {@code entry} to relevant eviction lists. 2312 */ 2313 @GuardedBy("Segment.this") recordWrite(ReferenceEntry<K, V> entry)2314 void recordWrite(ReferenceEntry<K, V> entry) { 2315 // we are already under lock, so drain the recency queue immediately 2316 drainRecencyQueue(); 2317 evictionQueue.add(entry); 2318 if (map.expires()) { 2319 // currently MapMaker ensures that expireAfterWrite and 2320 // expireAfterAccess are mutually exclusive 2321 long expiration = map.expiresAfterAccess() 2322 ? map.expireAfterAccessNanos 2323 : map.expireAfterWriteNanos; 2324 recordExpirationTime(entry, expiration); 2325 expirationQueue.add(entry); 2326 } 2327 } 2328 2329 /** 2330 * Drains the recency queue, updating eviction metadata that the entries therein were read in 2331 * the specified relative order. This currently amounts to adding them to relevant eviction 2332 * lists (accounting for the fact that they could have been removed from the map since being 2333 * added to the recency queue). 2334 */ 2335 @GuardedBy("Segment.this") drainRecencyQueue()2336 void drainRecencyQueue() { 2337 ReferenceEntry<K, V> e; 2338 while ((e = recencyQueue.poll()) != null) { 2339 // An entry may be in the recency queue despite it being removed from 2340 // the map . This can occur when the entry was concurrently read while a 2341 // writer is removing it from the segment or after a clear has removed 2342 // all of the segment's entries. 2343 if (evictionQueue.contains(e)) { 2344 evictionQueue.add(e); 2345 } 2346 if (map.expiresAfterAccess() && expirationQueue.contains(e)) { 2347 expirationQueue.add(e); 2348 } 2349 } 2350 } 2351 2352 // expiration 2353 recordExpirationTime(ReferenceEntry<K, V> entry, long expirationNanos)2354 void recordExpirationTime(ReferenceEntry<K, V> entry, long expirationNanos) { 2355 // might overflow, but that's okay (see isExpired()) 2356 entry.setExpirationTime(map.ticker.read() + expirationNanos); 2357 } 2358 2359 /** 2360 * Cleanup expired entries when the lock is available. 2361 */ tryExpireEntries()2362 void tryExpireEntries() { 2363 if (tryLock()) { 2364 try { 2365 expireEntries(); 2366 } finally { 2367 unlock(); 2368 // don't call postWriteCleanup as we're in a read 2369 } 2370 } 2371 } 2372 2373 @GuardedBy("Segment.this") expireEntries()2374 void expireEntries() { 2375 drainRecencyQueue(); 2376 2377 if (expirationQueue.isEmpty()) { 2378 // There's no point in calling nanoTime() if we have no entries to 2379 // expire. 2380 return; 2381 } 2382 long now = map.ticker.read(); 2383 ReferenceEntry<K, V> e; 2384 while ((e = expirationQueue.peek()) != null && map.isExpired(e, now)) { 2385 if (!removeEntry(e, e.getHash(), RemovalCause.EXPIRED)) { 2386 throw new AssertionError(); 2387 } 2388 } 2389 } 2390 2391 // eviction 2392 enqueueNotification(ReferenceEntry<K, V> entry, RemovalCause cause)2393 void enqueueNotification(ReferenceEntry<K, V> entry, RemovalCause cause) { 2394 enqueueNotification(entry.getKey(), entry.getHash(), entry.getValueReference().get(), cause); 2395 } 2396 enqueueNotification(@ullable K key, int hash, @Nullable V value, RemovalCause cause)2397 void enqueueNotification(@Nullable K key, int hash, @Nullable V value, RemovalCause cause) { 2398 if (map.removalNotificationQueue != DISCARDING_QUEUE) { 2399 RemovalNotification<K, V> notification = new RemovalNotification<K, V>(key, value, cause); 2400 map.removalNotificationQueue.offer(notification); 2401 } 2402 } 2403 2404 /** 2405 * Performs eviction if the segment is full. This should only be called prior to adding a new 2406 * entry and increasing {@code count}. 2407 * 2408 * @return {@code true} if eviction occurred 2409 */ 2410 @GuardedBy("Segment.this") evictEntries()2411 boolean evictEntries() { 2412 if (map.evictsBySize() && count >= maxSegmentSize) { 2413 drainRecencyQueue(); 2414 2415 ReferenceEntry<K, V> e = evictionQueue.remove(); 2416 if (!removeEntry(e, e.getHash(), RemovalCause.SIZE)) { 2417 throw new AssertionError(); 2418 } 2419 return true; 2420 } 2421 return false; 2422 } 2423 2424 /** 2425 * Returns first entry of bin for given hash. 2426 */ getFirst(int hash)2427 ReferenceEntry<K, V> getFirst(int hash) { 2428 // read this volatile field only once 2429 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2430 return table.get(hash & (table.length() - 1)); 2431 } 2432 2433 // Specialized implementations of map methods 2434 getEntry(Object key, int hash)2435 ReferenceEntry<K, V> getEntry(Object key, int hash) { 2436 if (count != 0) { // read-volatile 2437 for (ReferenceEntry<K, V> e = getFirst(hash); e != null; e = e.getNext()) { 2438 if (e.getHash() != hash) { 2439 continue; 2440 } 2441 2442 K entryKey = e.getKey(); 2443 if (entryKey == null) { 2444 tryDrainReferenceQueues(); 2445 continue; 2446 } 2447 2448 if (map.keyEquivalence.equivalent(key, entryKey)) { 2449 return e; 2450 } 2451 } 2452 } 2453 2454 return null; 2455 } 2456 getLiveEntry(Object key, int hash)2457 ReferenceEntry<K, V> getLiveEntry(Object key, int hash) { 2458 ReferenceEntry<K, V> e = getEntry(key, hash); 2459 if (e == null) { 2460 return null; 2461 } else if (map.expires() && map.isExpired(e)) { 2462 tryExpireEntries(); 2463 return null; 2464 } 2465 return e; 2466 } 2467 get(Object key, int hash)2468 V get(Object key, int hash) { 2469 try { 2470 ReferenceEntry<K, V> e = getLiveEntry(key, hash); 2471 if (e == null) { 2472 return null; 2473 } 2474 2475 V value = e.getValueReference().get(); 2476 if (value != null) { 2477 recordRead(e); 2478 } else { 2479 tryDrainReferenceQueues(); 2480 } 2481 return value; 2482 } finally { 2483 postReadCleanup(); 2484 } 2485 } 2486 containsKey(Object key, int hash)2487 boolean containsKey(Object key, int hash) { 2488 try { 2489 if (count != 0) { // read-volatile 2490 ReferenceEntry<K, V> e = getLiveEntry(key, hash); 2491 if (e == null) { 2492 return false; 2493 } 2494 return e.getValueReference().get() != null; 2495 } 2496 2497 return false; 2498 } finally { 2499 postReadCleanup(); 2500 } 2501 } 2502 2503 /** 2504 * This method is a convenience for testing. Code should call {@link 2505 * MapMakerInternalMap#containsValue} directly. 2506 */ 2507 @VisibleForTesting containsValue(Object value)2508 boolean containsValue(Object value) { 2509 try { 2510 if (count != 0) { // read-volatile 2511 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2512 int length = table.length(); 2513 for (int i = 0; i < length; ++i) { 2514 for (ReferenceEntry<K, V> e = table.get(i); e != null; e = e.getNext()) { 2515 V entryValue = getLiveValue(e); 2516 if (entryValue == null) { 2517 continue; 2518 } 2519 if (map.valueEquivalence.equivalent(value, entryValue)) { 2520 return true; 2521 } 2522 } 2523 } 2524 } 2525 2526 return false; 2527 } finally { 2528 postReadCleanup(); 2529 } 2530 } 2531 put(K key, int hash, V value, boolean onlyIfAbsent)2532 V put(K key, int hash, V value, boolean onlyIfAbsent) { 2533 lock(); 2534 try { 2535 preWriteCleanup(); 2536 2537 int newCount = this.count + 1; 2538 if (newCount > this.threshold) { // ensure capacity 2539 expand(); 2540 newCount = this.count + 1; 2541 } 2542 2543 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2544 int index = hash & (table.length() - 1); 2545 ReferenceEntry<K, V> first = table.get(index); 2546 2547 // Look for an existing entry. 2548 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 2549 K entryKey = e.getKey(); 2550 if (e.getHash() == hash && entryKey != null 2551 && map.keyEquivalence.equivalent(key, entryKey)) { 2552 // We found an existing entry. 2553 2554 ValueReference<K, V> valueReference = e.getValueReference(); 2555 V entryValue = valueReference.get(); 2556 2557 if (entryValue == null) { 2558 ++modCount; 2559 setValue(e, value); 2560 if (!valueReference.isComputingReference()) { 2561 enqueueNotification(key, hash, entryValue, RemovalCause.COLLECTED); 2562 newCount = this.count; // count remains unchanged 2563 } else if (evictEntries()) { // evictEntries after setting new value 2564 newCount = this.count + 1; 2565 } 2566 this.count = newCount; // write-volatile 2567 return null; 2568 } else if (onlyIfAbsent) { 2569 // Mimic 2570 // "if (!map.containsKey(key)) ... 2571 // else return map.get(key); 2572 recordLockedRead(e); 2573 return entryValue; 2574 } else { 2575 // clobber existing entry, count remains unchanged 2576 ++modCount; 2577 enqueueNotification(key, hash, entryValue, RemovalCause.REPLACED); 2578 setValue(e, value); 2579 return entryValue; 2580 } 2581 } 2582 } 2583 2584 // Create a new entry. 2585 ++modCount; 2586 ReferenceEntry<K, V> newEntry = newEntry(key, hash, first); 2587 setValue(newEntry, value); 2588 table.set(index, newEntry); 2589 if (evictEntries()) { // evictEntries after setting new value 2590 newCount = this.count + 1; 2591 } 2592 this.count = newCount; // write-volatile 2593 return null; 2594 } finally { 2595 unlock(); 2596 postWriteCleanup(); 2597 } 2598 } 2599 2600 /** 2601 * Expands the table if possible. 2602 */ 2603 @GuardedBy("Segment.this") expand()2604 void expand() { 2605 AtomicReferenceArray<ReferenceEntry<K, V>> oldTable = table; 2606 int oldCapacity = oldTable.length(); 2607 if (oldCapacity >= MAXIMUM_CAPACITY) { 2608 return; 2609 } 2610 2611 /* 2612 * Reclassify nodes in each list to new Map. Because we are using power-of-two expansion, the 2613 * elements from each bin must either stay at same index, or move with a power of two offset. 2614 * We eliminate unnecessary node creation by catching cases where old nodes can be reused 2615 * because their next fields won't change. Statistically, at the default threshold, only 2616 * about one-sixth of them need cloning when a table doubles. The nodes they replace will be 2617 * garbage collectable as soon as they are no longer referenced by any reader thread that may 2618 * be in the midst of traversing table right now. 2619 */ 2620 2621 int newCount = count; 2622 AtomicReferenceArray<ReferenceEntry<K, V>> newTable = newEntryArray(oldCapacity << 1); 2623 threshold = newTable.length() * 3 / 4; 2624 int newMask = newTable.length() - 1; 2625 for (int oldIndex = 0; oldIndex < oldCapacity; ++oldIndex) { 2626 // We need to guarantee that any existing reads of old Map can 2627 // proceed. So we cannot yet null out each bin. 2628 ReferenceEntry<K, V> head = oldTable.get(oldIndex); 2629 2630 if (head != null) { 2631 ReferenceEntry<K, V> next = head.getNext(); 2632 int headIndex = head.getHash() & newMask; 2633 2634 // Single node on list 2635 if (next == null) { 2636 newTable.set(headIndex, head); 2637 } else { 2638 // Reuse the consecutive sequence of nodes with the same target 2639 // index from the end of the list. tail points to the first 2640 // entry in the reusable list. 2641 ReferenceEntry<K, V> tail = head; 2642 int tailIndex = headIndex; 2643 for (ReferenceEntry<K, V> e = next; e != null; e = e.getNext()) { 2644 int newIndex = e.getHash() & newMask; 2645 if (newIndex != tailIndex) { 2646 // The index changed. We'll need to copy the previous entry. 2647 tailIndex = newIndex; 2648 tail = e; 2649 } 2650 } 2651 newTable.set(tailIndex, tail); 2652 2653 // Clone nodes leading up to the tail. 2654 for (ReferenceEntry<K, V> e = head; e != tail; e = e.getNext()) { 2655 if (isCollected(e)) { 2656 removeCollectedEntry(e); 2657 newCount--; 2658 } else { 2659 int newIndex = e.getHash() & newMask; 2660 ReferenceEntry<K, V> newNext = newTable.get(newIndex); 2661 ReferenceEntry<K, V> newFirst = copyEntry(e, newNext); 2662 newTable.set(newIndex, newFirst); 2663 } 2664 } 2665 } 2666 } 2667 } 2668 table = newTable; 2669 this.count = newCount; 2670 } 2671 replace(K key, int hash, V oldValue, V newValue)2672 boolean replace(K key, int hash, V oldValue, V newValue) { 2673 lock(); 2674 try { 2675 preWriteCleanup(); 2676 2677 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2678 int index = hash & (table.length() - 1); 2679 ReferenceEntry<K, V> first = table.get(index); 2680 2681 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 2682 K entryKey = e.getKey(); 2683 if (e.getHash() == hash && entryKey != null 2684 && map.keyEquivalence.equivalent(key, entryKey)) { 2685 // If the value disappeared, this entry is partially collected, 2686 // and we should pretend like it doesn't exist. 2687 ValueReference<K, V> valueReference = e.getValueReference(); 2688 V entryValue = valueReference.get(); 2689 if (entryValue == null) { 2690 if (isCollected(valueReference)) { 2691 int newCount = this.count - 1; 2692 ++modCount; 2693 enqueueNotification(entryKey, hash, entryValue, RemovalCause.COLLECTED); 2694 ReferenceEntry<K, V> newFirst = removeFromChain(first, e); 2695 newCount = this.count - 1; 2696 table.set(index, newFirst); 2697 this.count = newCount; // write-volatile 2698 } 2699 return false; 2700 } 2701 2702 if (map.valueEquivalence.equivalent(oldValue, entryValue)) { 2703 ++modCount; 2704 enqueueNotification(key, hash, entryValue, RemovalCause.REPLACED); 2705 setValue(e, newValue); 2706 return true; 2707 } else { 2708 // Mimic 2709 // "if (map.containsKey(key) && map.get(key).equals(oldValue))..." 2710 recordLockedRead(e); 2711 return false; 2712 } 2713 } 2714 } 2715 2716 return false; 2717 } finally { 2718 unlock(); 2719 postWriteCleanup(); 2720 } 2721 } 2722 replace(K key, int hash, V newValue)2723 V replace(K key, int hash, V newValue) { 2724 lock(); 2725 try { 2726 preWriteCleanup(); 2727 2728 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2729 int index = hash & (table.length() - 1); 2730 ReferenceEntry<K, V> first = table.get(index); 2731 2732 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 2733 K entryKey = e.getKey(); 2734 if (e.getHash() == hash && entryKey != null 2735 && map.keyEquivalence.equivalent(key, entryKey)) { 2736 // If the value disappeared, this entry is partially collected, 2737 // and we should pretend like it doesn't exist. 2738 ValueReference<K, V> valueReference = e.getValueReference(); 2739 V entryValue = valueReference.get(); 2740 if (entryValue == null) { 2741 if (isCollected(valueReference)) { 2742 int newCount = this.count - 1; 2743 ++modCount; 2744 enqueueNotification(entryKey, hash, entryValue, RemovalCause.COLLECTED); 2745 ReferenceEntry<K, V> newFirst = removeFromChain(first, e); 2746 newCount = this.count - 1; 2747 table.set(index, newFirst); 2748 this.count = newCount; // write-volatile 2749 } 2750 return null; 2751 } 2752 2753 ++modCount; 2754 enqueueNotification(key, hash, entryValue, RemovalCause.REPLACED); 2755 setValue(e, newValue); 2756 return entryValue; 2757 } 2758 } 2759 2760 return null; 2761 } finally { 2762 unlock(); 2763 postWriteCleanup(); 2764 } 2765 } 2766 remove(Object key, int hash)2767 V remove(Object key, int hash) { 2768 lock(); 2769 try { 2770 preWriteCleanup(); 2771 2772 int newCount = this.count - 1; 2773 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2774 int index = hash & (table.length() - 1); 2775 ReferenceEntry<K, V> first = table.get(index); 2776 2777 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 2778 K entryKey = e.getKey(); 2779 if (e.getHash() == hash && entryKey != null 2780 && map.keyEquivalence.equivalent(key, entryKey)) { 2781 ValueReference<K, V> valueReference = e.getValueReference(); 2782 V entryValue = valueReference.get(); 2783 2784 RemovalCause cause; 2785 if (entryValue != null) { 2786 cause = RemovalCause.EXPLICIT; 2787 } else if (isCollected(valueReference)) { 2788 cause = RemovalCause.COLLECTED; 2789 } else { 2790 return null; 2791 } 2792 2793 ++modCount; 2794 enqueueNotification(entryKey, hash, entryValue, cause); 2795 ReferenceEntry<K, V> newFirst = removeFromChain(first, e); 2796 newCount = this.count - 1; 2797 table.set(index, newFirst); 2798 this.count = newCount; // write-volatile 2799 return entryValue; 2800 } 2801 } 2802 2803 return null; 2804 } finally { 2805 unlock(); 2806 postWriteCleanup(); 2807 } 2808 } 2809 remove(Object key, int hash, Object value)2810 boolean remove(Object key, int hash, Object value) { 2811 lock(); 2812 try { 2813 preWriteCleanup(); 2814 2815 int newCount = this.count - 1; 2816 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2817 int index = hash & (table.length() - 1); 2818 ReferenceEntry<K, V> first = table.get(index); 2819 2820 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 2821 K entryKey = e.getKey(); 2822 if (e.getHash() == hash && entryKey != null 2823 && map.keyEquivalence.equivalent(key, entryKey)) { 2824 ValueReference<K, V> valueReference = e.getValueReference(); 2825 V entryValue = valueReference.get(); 2826 2827 RemovalCause cause; 2828 if (map.valueEquivalence.equivalent(value, entryValue)) { 2829 cause = RemovalCause.EXPLICIT; 2830 } else if (isCollected(valueReference)) { 2831 cause = RemovalCause.COLLECTED; 2832 } else { 2833 return false; 2834 } 2835 2836 ++modCount; 2837 enqueueNotification(entryKey, hash, entryValue, cause); 2838 ReferenceEntry<K, V> newFirst = removeFromChain(first, e); 2839 newCount = this.count - 1; 2840 table.set(index, newFirst); 2841 this.count = newCount; // write-volatile 2842 return (cause == RemovalCause.EXPLICIT); 2843 } 2844 } 2845 2846 return false; 2847 } finally { 2848 unlock(); 2849 postWriteCleanup(); 2850 } 2851 } 2852 clear()2853 void clear() { 2854 if (count != 0) { 2855 lock(); 2856 try { 2857 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2858 if (map.removalNotificationQueue != DISCARDING_QUEUE) { 2859 for (int i = 0; i < table.length(); ++i) { 2860 for (ReferenceEntry<K, V> e = table.get(i); e != null; e = e.getNext()) { 2861 // Computing references aren't actually in the map yet. 2862 if (!e.getValueReference().isComputingReference()) { 2863 enqueueNotification(e, RemovalCause.EXPLICIT); 2864 } 2865 } 2866 } 2867 } 2868 for (int i = 0; i < table.length(); ++i) { 2869 table.set(i, null); 2870 } 2871 clearReferenceQueues(); 2872 evictionQueue.clear(); 2873 expirationQueue.clear(); 2874 readCount.set(0); 2875 2876 ++modCount; 2877 count = 0; // write-volatile 2878 } finally { 2879 unlock(); 2880 postWriteCleanup(); 2881 } 2882 } 2883 } 2884 2885 /** 2886 * Removes an entry from within a table. All entries following the removed node can stay, but 2887 * all preceding ones need to be cloned. 2888 * 2889 * <p>This method does not decrement count for the removed entry, but does decrement count for 2890 * all partially collected entries which are skipped. As such callers which are modifying count 2891 * must re-read it after calling removeFromChain. 2892 * 2893 * @param first the first entry of the table 2894 * @param entry the entry being removed from the table 2895 * @return the new first entry for the table 2896 */ 2897 @GuardedBy("Segment.this") removeFromChain(ReferenceEntry<K, V> first, ReferenceEntry<K, V> entry)2898 ReferenceEntry<K, V> removeFromChain(ReferenceEntry<K, V> first, ReferenceEntry<K, V> entry) { 2899 evictionQueue.remove(entry); 2900 expirationQueue.remove(entry); 2901 2902 int newCount = count; 2903 ReferenceEntry<K, V> newFirst = entry.getNext(); 2904 for (ReferenceEntry<K, V> e = first; e != entry; e = e.getNext()) { 2905 if (isCollected(e)) { 2906 removeCollectedEntry(e); 2907 newCount--; 2908 } else { 2909 newFirst = copyEntry(e, newFirst); 2910 } 2911 } 2912 this.count = newCount; 2913 return newFirst; 2914 } 2915 removeCollectedEntry(ReferenceEntry<K, V> entry)2916 void removeCollectedEntry(ReferenceEntry<K, V> entry) { 2917 enqueueNotification(entry, RemovalCause.COLLECTED); 2918 evictionQueue.remove(entry); 2919 expirationQueue.remove(entry); 2920 } 2921 2922 /** 2923 * Removes an entry whose key has been garbage collected. 2924 */ reclaimKey(ReferenceEntry<K, V> entry, int hash)2925 boolean reclaimKey(ReferenceEntry<K, V> entry, int hash) { 2926 lock(); 2927 try { 2928 int newCount = count - 1; 2929 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2930 int index = hash & (table.length() - 1); 2931 ReferenceEntry<K, V> first = table.get(index); 2932 2933 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 2934 if (e == entry) { 2935 ++modCount; 2936 enqueueNotification( 2937 e.getKey(), hash, e.getValueReference().get(), RemovalCause.COLLECTED); 2938 ReferenceEntry<K, V> newFirst = removeFromChain(first, e); 2939 newCount = this.count - 1; 2940 table.set(index, newFirst); 2941 this.count = newCount; // write-volatile 2942 return true; 2943 } 2944 } 2945 2946 return false; 2947 } finally { 2948 unlock(); 2949 postWriteCleanup(); 2950 } 2951 } 2952 2953 /** 2954 * Removes an entry whose value has been garbage collected. 2955 */ reclaimValue(K key, int hash, ValueReference<K, V> valueReference)2956 boolean reclaimValue(K key, int hash, ValueReference<K, V> valueReference) { 2957 lock(); 2958 try { 2959 int newCount = this.count - 1; 2960 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2961 int index = hash & (table.length() - 1); 2962 ReferenceEntry<K, V> first = table.get(index); 2963 2964 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 2965 K entryKey = e.getKey(); 2966 if (e.getHash() == hash && entryKey != null 2967 && map.keyEquivalence.equivalent(key, entryKey)) { 2968 ValueReference<K, V> v = e.getValueReference(); 2969 if (v == valueReference) { 2970 ++modCount; 2971 enqueueNotification(key, hash, valueReference.get(), RemovalCause.COLLECTED); 2972 ReferenceEntry<K, V> newFirst = removeFromChain(first, e); 2973 newCount = this.count - 1; 2974 table.set(index, newFirst); 2975 this.count = newCount; // write-volatile 2976 return true; 2977 } 2978 return false; 2979 } 2980 } 2981 2982 return false; 2983 } finally { 2984 unlock(); 2985 if (!isHeldByCurrentThread()) { // don't cleanup inside of put 2986 postWriteCleanup(); 2987 } 2988 } 2989 } 2990 2991 /** 2992 * Clears a value that has not yet been set, and thus does not require count to be modified. 2993 */ clearValue(K key, int hash, ValueReference<K, V> valueReference)2994 boolean clearValue(K key, int hash, ValueReference<K, V> valueReference) { 2995 lock(); 2996 try { 2997 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2998 int index = hash & (table.length() - 1); 2999 ReferenceEntry<K, V> first = table.get(index); 3000 3001 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 3002 K entryKey = e.getKey(); 3003 if (e.getHash() == hash && entryKey != null 3004 && map.keyEquivalence.equivalent(key, entryKey)) { 3005 ValueReference<K, V> v = e.getValueReference(); 3006 if (v == valueReference) { 3007 ReferenceEntry<K, V> newFirst = removeFromChain(first, e); 3008 table.set(index, newFirst); 3009 return true; 3010 } 3011 return false; 3012 } 3013 } 3014 3015 return false; 3016 } finally { 3017 unlock(); 3018 postWriteCleanup(); 3019 } 3020 } 3021 3022 @GuardedBy("Segment.this") removeEntry(ReferenceEntry<K, V> entry, int hash, RemovalCause cause)3023 boolean removeEntry(ReferenceEntry<K, V> entry, int hash, RemovalCause cause) { 3024 int newCount = this.count - 1; 3025 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 3026 int index = hash & (table.length() - 1); 3027 ReferenceEntry<K, V> first = table.get(index); 3028 3029 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 3030 if (e == entry) { 3031 ++modCount; 3032 enqueueNotification(e.getKey(), hash, e.getValueReference().get(), cause); 3033 ReferenceEntry<K, V> newFirst = removeFromChain(first, e); 3034 newCount = this.count - 1; 3035 table.set(index, newFirst); 3036 this.count = newCount; // write-volatile 3037 return true; 3038 } 3039 } 3040 3041 return false; 3042 } 3043 3044 /** 3045 * Returns {@code true} if the entry has been partially collected, meaning that either the key 3046 * is null, or the value is null and it is not computing. 3047 */ isCollected(ReferenceEntry<K, V> entry)3048 boolean isCollected(ReferenceEntry<K, V> entry) { 3049 if (entry.getKey() == null) { 3050 return true; 3051 } 3052 return isCollected(entry.getValueReference()); 3053 } 3054 3055 /** 3056 * Returns {@code true} if the value has been partially collected, meaning that the value is 3057 * null and it is not computing. 3058 */ isCollected(ValueReference<K, V> valueReference)3059 boolean isCollected(ValueReference<K, V> valueReference) { 3060 if (valueReference.isComputingReference()) { 3061 return false; 3062 } 3063 return (valueReference.get() == null); 3064 } 3065 3066 /** 3067 * Gets the value from an entry. Returns {@code null} if the entry is invalid, 3068 * partially-collected, computing, or expired. 3069 */ getLiveValue(ReferenceEntry<K, V> entry)3070 V getLiveValue(ReferenceEntry<K, V> entry) { 3071 if (entry.getKey() == null) { 3072 tryDrainReferenceQueues(); 3073 return null; 3074 } 3075 V value = entry.getValueReference().get(); 3076 if (value == null) { 3077 tryDrainReferenceQueues(); 3078 return null; 3079 } 3080 3081 if (map.expires() && map.isExpired(entry)) { 3082 tryExpireEntries(); 3083 return null; 3084 } 3085 return value; 3086 } 3087 3088 /** 3089 * Performs routine cleanup following a read. Normally cleanup happens during writes, or from 3090 * the cleanupExecutor. If cleanup is not observed after a sufficient number of reads, try 3091 * cleaning up from the read thread. 3092 */ postReadCleanup()3093 void postReadCleanup() { 3094 if ((readCount.incrementAndGet() & DRAIN_THRESHOLD) == 0) { 3095 runCleanup(); 3096 } 3097 } 3098 3099 /** 3100 * Performs routine cleanup prior to executing a write. This should be called every time a 3101 * write thread acquires the segment lock, immediately after acquiring the lock. 3102 * 3103 * <p>Post-condition: expireEntries has been run. 3104 */ 3105 @GuardedBy("Segment.this") preWriteCleanup()3106 void preWriteCleanup() { 3107 runLockedCleanup(); 3108 } 3109 3110 /** 3111 * Performs routine cleanup following a write. 3112 */ postWriteCleanup()3113 void postWriteCleanup() { 3114 runUnlockedCleanup(); 3115 } 3116 runCleanup()3117 void runCleanup() { 3118 runLockedCleanup(); 3119 runUnlockedCleanup(); 3120 } 3121 runLockedCleanup()3122 void runLockedCleanup() { 3123 if (tryLock()) { 3124 try { 3125 drainReferenceQueues(); 3126 expireEntries(); // calls drainRecencyQueue 3127 readCount.set(0); 3128 } finally { 3129 unlock(); 3130 } 3131 } 3132 } 3133 runUnlockedCleanup()3134 void runUnlockedCleanup() { 3135 // locked cleanup may generate notifications we can send unlocked 3136 if (!isHeldByCurrentThread()) { 3137 map.processPendingNotifications(); 3138 } 3139 } 3140 3141 } 3142 3143 // Queues 3144 3145 /** 3146 * A custom queue for managing eviction order. Note that this is tightly integrated with {@code 3147 * ReferenceEntry}, upon which it relies to perform its linking. 3148 * 3149 * <p>Note that this entire implementation makes the assumption that all elements which are in 3150 * the map are also in this queue, and that all elements not in the queue are not in the map. 3151 * 3152 * <p>The benefits of creating our own queue are that (1) we can replace elements in the middle 3153 * of the queue as part of copyEvictableEntry, and (2) the contains method is highly optimized 3154 * for the current model. 3155 */ 3156 static final class EvictionQueue<K, V> extends AbstractQueue<ReferenceEntry<K, V>> { 3157 final ReferenceEntry<K, V> head = new AbstractReferenceEntry<K, V>() { 3158 3159 ReferenceEntry<K, V> nextEvictable = this; 3160 3161 @Override 3162 public ReferenceEntry<K, V> getNextEvictable() { 3163 return nextEvictable; 3164 } 3165 3166 @Override 3167 public void setNextEvictable(ReferenceEntry<K, V> next) { 3168 this.nextEvictable = next; 3169 } 3170 3171 ReferenceEntry<K, V> previousEvictable = this; 3172 3173 @Override 3174 public ReferenceEntry<K, V> getPreviousEvictable() { 3175 return previousEvictable; 3176 } 3177 3178 @Override 3179 public void setPreviousEvictable(ReferenceEntry<K, V> previous) { 3180 this.previousEvictable = previous; 3181 } 3182 }; 3183 3184 // implements Queue 3185 3186 @Override offer(ReferenceEntry<K, V> entry)3187 public boolean offer(ReferenceEntry<K, V> entry) { 3188 // unlink 3189 connectEvictables(entry.getPreviousEvictable(), entry.getNextEvictable()); 3190 3191 // add to tail 3192 connectEvictables(head.getPreviousEvictable(), entry); 3193 connectEvictables(entry, head); 3194 3195 return true; 3196 } 3197 3198 @Override peek()3199 public ReferenceEntry<K, V> peek() { 3200 ReferenceEntry<K, V> next = head.getNextEvictable(); 3201 return (next == head) ? null : next; 3202 } 3203 3204 @Override poll()3205 public ReferenceEntry<K, V> poll() { 3206 ReferenceEntry<K, V> next = head.getNextEvictable(); 3207 if (next == head) { 3208 return null; 3209 } 3210 3211 remove(next); 3212 return next; 3213 } 3214 3215 @Override 3216 @SuppressWarnings("unchecked") remove(Object o)3217 public boolean remove(Object o) { 3218 ReferenceEntry<K, V> e = (ReferenceEntry) o; 3219 ReferenceEntry<K, V> previous = e.getPreviousEvictable(); 3220 ReferenceEntry<K, V> next = e.getNextEvictable(); 3221 connectEvictables(previous, next); 3222 nullifyEvictable(e); 3223 3224 return next != NullEntry.INSTANCE; 3225 } 3226 3227 @Override 3228 @SuppressWarnings("unchecked") contains(Object o)3229 public boolean contains(Object o) { 3230 ReferenceEntry<K, V> e = (ReferenceEntry) o; 3231 return e.getNextEvictable() != NullEntry.INSTANCE; 3232 } 3233 3234 @Override isEmpty()3235 public boolean isEmpty() { 3236 return head.getNextEvictable() == head; 3237 } 3238 3239 @Override size()3240 public int size() { 3241 int size = 0; 3242 for (ReferenceEntry<K, V> e = head.getNextEvictable(); e != head; e = e.getNextEvictable()) { 3243 size++; 3244 } 3245 return size; 3246 } 3247 3248 @Override clear()3249 public void clear() { 3250 ReferenceEntry<K, V> e = head.getNextEvictable(); 3251 while (e != head) { 3252 ReferenceEntry<K, V> next = e.getNextEvictable(); 3253 nullifyEvictable(e); 3254 e = next; 3255 } 3256 3257 head.setNextEvictable(head); 3258 head.setPreviousEvictable(head); 3259 } 3260 3261 @Override iterator()3262 public Iterator<ReferenceEntry<K, V>> iterator() { 3263 return new AbstractLinkedIterator<ReferenceEntry<K, V>>(peek()) { 3264 @Override 3265 protected ReferenceEntry<K, V> computeNext(ReferenceEntry<K, V> previous) { 3266 ReferenceEntry<K, V> next = previous.getNextEvictable(); 3267 return (next == head) ? null : next; 3268 } 3269 }; 3270 } 3271 } 3272 3273 /** 3274 * A custom queue for managing expiration order. Note that this is tightly integrated with 3275 * {@code ReferenceEntry}, upon which it reliese to perform its linking. 3276 * 3277 * <p>Note that this entire implementation makes the assumption that all elements which are in 3278 * the map are also in this queue, and that all elements not in the queue are not in the map. 3279 * 3280 * <p>The benefits of creating our own queue are that (1) we can replace elements in the middle 3281 * of the queue as part of copyEvictableEntry, and (2) the contains method is highly optimized 3282 * for the current model. 3283 */ 3284 static final class ExpirationQueue<K, V> extends AbstractQueue<ReferenceEntry<K, V>> { 3285 final ReferenceEntry<K, V> head = new AbstractReferenceEntry<K, V>() { 3286 3287 @Override 3288 public long getExpirationTime() { 3289 return Long.MAX_VALUE; 3290 } 3291 3292 @Override 3293 public void setExpirationTime(long time) {} 3294 3295 ReferenceEntry<K, V> nextExpirable = this; 3296 3297 @Override 3298 public ReferenceEntry<K, V> getNextExpirable() { 3299 return nextExpirable; 3300 } 3301 3302 @Override 3303 public void setNextExpirable(ReferenceEntry<K, V> next) { 3304 this.nextExpirable = next; 3305 } 3306 3307 ReferenceEntry<K, V> previousExpirable = this; 3308 3309 @Override 3310 public ReferenceEntry<K, V> getPreviousExpirable() { 3311 return previousExpirable; 3312 } 3313 3314 @Override 3315 public void setPreviousExpirable(ReferenceEntry<K, V> previous) { 3316 this.previousExpirable = previous; 3317 } 3318 }; 3319 3320 // implements Queue 3321 3322 @Override 3323 public boolean offer(ReferenceEntry<K, V> entry) { 3324 // unlink 3325 connectExpirables(entry.getPreviousExpirable(), entry.getNextExpirable()); 3326 3327 // add to tail 3328 connectExpirables(head.getPreviousExpirable(), entry); 3329 connectExpirables(entry, head); 3330 3331 return true; 3332 } 3333 3334 @Override 3335 public ReferenceEntry<K, V> peek() { 3336 ReferenceEntry<K, V> next = head.getNextExpirable(); 3337 return (next == head) ? null : next; 3338 } 3339 3340 @Override 3341 public ReferenceEntry<K, V> poll() { 3342 ReferenceEntry<K, V> next = head.getNextExpirable(); 3343 if (next == head) { 3344 return null; 3345 } 3346 3347 remove(next); 3348 return next; 3349 } 3350 3351 @Override 3352 @SuppressWarnings("unchecked") 3353 public boolean remove(Object o) { 3354 ReferenceEntry<K, V> e = (ReferenceEntry) o; 3355 ReferenceEntry<K, V> previous = e.getPreviousExpirable(); 3356 ReferenceEntry<K, V> next = e.getNextExpirable(); 3357 connectExpirables(previous, next); 3358 nullifyExpirable(e); 3359 3360 return next != NullEntry.INSTANCE; 3361 } 3362 3363 @Override 3364 @SuppressWarnings("unchecked") 3365 public boolean contains(Object o) { 3366 ReferenceEntry<K, V> e = (ReferenceEntry) o; 3367 return e.getNextExpirable() != NullEntry.INSTANCE; 3368 } 3369 3370 @Override 3371 public boolean isEmpty() { 3372 return head.getNextExpirable() == head; 3373 } 3374 3375 @Override 3376 public int size() { 3377 int size = 0; 3378 for (ReferenceEntry<K, V> e = head.getNextExpirable(); e != head; e = e.getNextExpirable()) { 3379 size++; 3380 } 3381 return size; 3382 } 3383 3384 @Override 3385 public void clear() { 3386 ReferenceEntry<K, V> e = head.getNextExpirable(); 3387 while (e != head) { 3388 ReferenceEntry<K, V> next = e.getNextExpirable(); 3389 nullifyExpirable(e); 3390 e = next; 3391 } 3392 3393 head.setNextExpirable(head); 3394 head.setPreviousExpirable(head); 3395 } 3396 3397 @Override 3398 public Iterator<ReferenceEntry<K, V>> iterator() { 3399 return new AbstractLinkedIterator<ReferenceEntry<K, V>>(peek()) { 3400 @Override 3401 protected ReferenceEntry<K, V> computeNext(ReferenceEntry<K, V> previous) { 3402 ReferenceEntry<K, V> next = previous.getNextExpirable(); 3403 return (next == head) ? null : next; 3404 } 3405 }; 3406 } 3407 } 3408 3409 static final class CleanupMapTask implements Runnable { 3410 final WeakReference<MapMakerInternalMap<?, ?>> mapReference; 3411 3412 public CleanupMapTask(MapMakerInternalMap<?, ?> map) { 3413 this.mapReference = new WeakReference<MapMakerInternalMap<?, ?>>(map); 3414 } 3415 3416 @Override 3417 public void run() { 3418 MapMakerInternalMap<?, ?> map = mapReference.get(); 3419 if (map == null) { 3420 throw new CancellationException(); 3421 } 3422 3423 for (Segment<?, ?> segment : map.segments) { 3424 segment.runCleanup(); 3425 } 3426 } 3427 } 3428 3429 // ConcurrentMap methods 3430 3431 @Override 3432 public boolean isEmpty() { 3433 /* 3434 * Sum per-segment modCounts to avoid mis-reporting when elements are concurrently added and 3435 * removed in one segment while checking another, in which case the table was never actually 3436 * empty at any point. (The sum ensures accuracy up through at least 1<<31 per-segment 3437 * modifications before recheck.) Method containsValue() uses similar constructions for 3438 * stability checks. 3439 */ 3440 long sum = 0L; 3441 Segment<K, V>[] segments = this.segments; 3442 for (int i = 0; i < segments.length; ++i) { 3443 if (segments[i].count != 0) { 3444 return false; 3445 } 3446 sum += segments[i].modCount; 3447 } 3448 3449 if (sum != 0L) { // recheck unless no modifications 3450 for (int i = 0; i < segments.length; ++i) { 3451 if (segments[i].count != 0) { 3452 return false; 3453 } 3454 sum -= segments[i].modCount; 3455 } 3456 if (sum != 0L) { 3457 return false; 3458 } 3459 } 3460 return true; 3461 } 3462 3463 @Override 3464 public int size() { 3465 Segment<K, V>[] segments = this.segments; 3466 long sum = 0; 3467 for (int i = 0; i < segments.length; ++i) { 3468 sum += segments[i].count; 3469 } 3470 return Ints.saturatedCast(sum); 3471 } 3472 3473 @Override 3474 public V get(@Nullable Object key) { 3475 if (key == null) { 3476 return null; 3477 } 3478 int hash = hash(key); 3479 return segmentFor(hash).get(key, hash); 3480 } 3481 3482 /** 3483 * Returns the internal entry for the specified key. The entry may be computing, expired, or 3484 * partially collected. Does not impact recency ordering. 3485 */ 3486 ReferenceEntry<K, V> getEntry(@Nullable Object key) { 3487 if (key == null) { 3488 return null; 3489 } 3490 int hash = hash(key); 3491 return segmentFor(hash).getEntry(key, hash); 3492 } 3493 3494 /** 3495 * Returns the live internal entry for the specified key. Does not impact recency ordering. 3496 */ 3497 ReferenceEntry<K, V> getLiveEntry(@Nullable Object key) { 3498 if (key == null) { 3499 return null; 3500 } 3501 int hash = hash(key); 3502 return segmentFor(hash).getLiveEntry(key, hash); 3503 } 3504 3505 @Override 3506 public boolean containsKey(@Nullable Object key) { 3507 if (key == null) { 3508 return false; 3509 } 3510 int hash = hash(key); 3511 return segmentFor(hash).containsKey(key, hash); 3512 } 3513 3514 @Override 3515 public boolean containsValue(@Nullable Object value) { 3516 if (value == null) { 3517 return false; 3518 } 3519 3520 // This implementation is patterned after ConcurrentHashMap, but without the locking. The only 3521 // way for it to return a false negative would be for the target value to jump around in the map 3522 // such that none of the subsequent iterations observed it, despite the fact that at every point 3523 // in time it was present somewhere int the map. This becomes increasingly unlikely as 3524 // CONTAINS_VALUE_RETRIES increases, though without locking it is theoretically possible. 3525 final Segment<K,V>[] segments = this.segments; 3526 long last = -1L; 3527 for (int i = 0; i < CONTAINS_VALUE_RETRIES; i++) { 3528 long sum = 0L; 3529 for (Segment<K, V> segment : segments) { 3530 // ensure visibility of most recent completed write 3531 @SuppressWarnings({"UnusedDeclaration", "unused"}) 3532 int c = segment.count; // read-volatile 3533 3534 AtomicReferenceArray<ReferenceEntry<K, V>> table = segment.table; 3535 for (int j = 0 ; j < table.length(); j++) { 3536 for (ReferenceEntry<K, V> e = table.get(j); e != null; e = e.getNext()) { 3537 V v = segment.getLiveValue(e); 3538 if (v != null && valueEquivalence.equivalent(value, v)) { 3539 return true; 3540 } 3541 } 3542 } 3543 sum += segment.modCount; 3544 } 3545 if (sum == last) { 3546 break; 3547 } 3548 last = sum; 3549 } 3550 return false; 3551 } 3552 3553 @Override 3554 public V put(K key, V value) { 3555 checkNotNull(key); 3556 checkNotNull(value); 3557 int hash = hash(key); 3558 return segmentFor(hash).put(key, hash, value, false); 3559 } 3560 3561 @Override 3562 public V putIfAbsent(K key, V value) { 3563 checkNotNull(key); 3564 checkNotNull(value); 3565 int hash = hash(key); 3566 return segmentFor(hash).put(key, hash, value, true); 3567 } 3568 3569 @Override 3570 public void putAll(Map<? extends K, ? extends V> m) { 3571 for (Entry<? extends K, ? extends V> e : m.entrySet()) { 3572 put(e.getKey(), e.getValue()); 3573 } 3574 } 3575 3576 @Override 3577 public V remove(@Nullable Object key) { 3578 if (key == null) { 3579 return null; 3580 } 3581 int hash = hash(key); 3582 return segmentFor(hash).remove(key, hash); 3583 } 3584 3585 @Override 3586 public boolean remove(@Nullable Object key, @Nullable Object value) { 3587 if (key == null || value == null) { 3588 return false; 3589 } 3590 int hash = hash(key); 3591 return segmentFor(hash).remove(key, hash, value); 3592 } 3593 3594 @Override 3595 public boolean replace(K key, @Nullable V oldValue, V newValue) { 3596 checkNotNull(key); 3597 checkNotNull(newValue); 3598 if (oldValue == null) { 3599 return false; 3600 } 3601 int hash = hash(key); 3602 return segmentFor(hash).replace(key, hash, oldValue, newValue); 3603 } 3604 3605 @Override 3606 public V replace(K key, V value) { 3607 checkNotNull(key); 3608 checkNotNull(value); 3609 int hash = hash(key); 3610 return segmentFor(hash).replace(key, hash, value); 3611 } 3612 3613 @Override 3614 public void clear() { 3615 for (Segment<K, V> segment : segments) { 3616 segment.clear(); 3617 } 3618 } 3619 3620 Set<K> keySet; 3621 3622 @Override 3623 public Set<K> keySet() { 3624 Set<K> ks = keySet; 3625 return (ks != null) ? ks : (keySet = new KeySet()); 3626 } 3627 3628 Collection<V> values; 3629 3630 @Override 3631 public Collection<V> values() { 3632 Collection<V> vs = values; 3633 return (vs != null) ? vs : (values = new Values()); 3634 } 3635 3636 Set<Entry<K, V>> entrySet; 3637 3638 @Override 3639 public Set<Entry<K, V>> entrySet() { 3640 Set<Entry<K, V>> es = entrySet; 3641 return (es != null) ? es : (entrySet = new EntrySet()); 3642 } 3643 3644 // Iterator Support 3645 3646 abstract class HashIterator { 3647 3648 int nextSegmentIndex; 3649 int nextTableIndex; 3650 Segment<K, V> currentSegment; 3651 AtomicReferenceArray<ReferenceEntry<K, V>> currentTable; 3652 ReferenceEntry<K, V> nextEntry; 3653 WriteThroughEntry nextExternal; 3654 WriteThroughEntry lastReturned; 3655 3656 HashIterator() { 3657 nextSegmentIndex = segments.length - 1; 3658 nextTableIndex = -1; 3659 advance(); 3660 } 3661 3662 final void advance() { 3663 nextExternal = null; 3664 3665 if (nextInChain()) { 3666 return; 3667 } 3668 3669 if (nextInTable()) { 3670 return; 3671 } 3672 3673 while (nextSegmentIndex >= 0) { 3674 currentSegment = segments[nextSegmentIndex--]; 3675 if (currentSegment.count != 0) { 3676 currentTable = currentSegment.table; 3677 nextTableIndex = currentTable.length() - 1; 3678 if (nextInTable()) { 3679 return; 3680 } 3681 } 3682 } 3683 } 3684 3685 /** 3686 * Finds the next entry in the current chain. Returns {@code true} if an entry was found. 3687 */ 3688 boolean nextInChain() { 3689 if (nextEntry != null) { 3690 for (nextEntry = nextEntry.getNext(); nextEntry != null; nextEntry = nextEntry.getNext()) { 3691 if (advanceTo(nextEntry)) { 3692 return true; 3693 } 3694 } 3695 } 3696 return false; 3697 } 3698 3699 /** 3700 * Finds the next entry in the current table. Returns {@code true} if an entry was found. 3701 */ 3702 boolean nextInTable() { 3703 while (nextTableIndex >= 0) { 3704 if ((nextEntry = currentTable.get(nextTableIndex--)) != null) { 3705 if (advanceTo(nextEntry) || nextInChain()) { 3706 return true; 3707 } 3708 } 3709 } 3710 return false; 3711 } 3712 3713 /** 3714 * Advances to the given entry. Returns {@code true} if the entry was valid, {@code false} if it 3715 * should be skipped. 3716 */ 3717 boolean advanceTo(ReferenceEntry<K, V> entry) { 3718 try { 3719 K key = entry.getKey(); 3720 V value = getLiveValue(entry); 3721 if (value != null) { 3722 nextExternal = new WriteThroughEntry(key, value); 3723 return true; 3724 } else { 3725 // Skip stale entry. 3726 return false; 3727 } 3728 } finally { 3729 currentSegment.postReadCleanup(); 3730 } 3731 } 3732 3733 public boolean hasNext() { 3734 return nextExternal != null; 3735 } 3736 3737 WriteThroughEntry nextEntry() { 3738 if (nextExternal == null) { 3739 throw new NoSuchElementException(); 3740 } 3741 lastReturned = nextExternal; 3742 advance(); 3743 return lastReturned; 3744 } 3745 3746 public void remove() { 3747 checkState(lastReturned != null); 3748 MapMakerInternalMap.this.remove(lastReturned.getKey()); 3749 lastReturned = null; 3750 } 3751 } 3752 3753 final class KeyIterator extends HashIterator implements Iterator<K> { 3754 3755 @Override 3756 public K next() { 3757 return nextEntry().getKey(); 3758 } 3759 } 3760 3761 final class ValueIterator extends HashIterator implements Iterator<V> { 3762 3763 @Override 3764 public V next() { 3765 return nextEntry().getValue(); 3766 } 3767 } 3768 3769 /** 3770 * Custom Entry class used by EntryIterator.next(), that relays setValue changes to the 3771 * underlying map. 3772 */ 3773 final class WriteThroughEntry extends AbstractMapEntry<K, V> { 3774 final K key; // non-null 3775 V value; // non-null 3776 3777 WriteThroughEntry(K key, V value) { 3778 this.key = key; 3779 this.value = value; 3780 } 3781 3782 @Override 3783 public K getKey() { 3784 return key; 3785 } 3786 3787 @Override 3788 public V getValue() { 3789 return value; 3790 } 3791 3792 @Override 3793 public boolean equals(@Nullable Object object) { 3794 // Cannot use key and value equivalence 3795 if (object instanceof Entry) { 3796 Entry<?, ?> that = (Entry<?, ?>) object; 3797 return key.equals(that.getKey()) && value.equals(that.getValue()); 3798 } 3799 return false; 3800 } 3801 3802 @Override 3803 public int hashCode() { 3804 // Cannot use key and value equivalence 3805 return key.hashCode() ^ value.hashCode(); 3806 } 3807 3808 @Override 3809 public V setValue(V newValue) { 3810 V oldValue = put(key, newValue); 3811 value = newValue; // only if put succeeds 3812 return oldValue; 3813 } 3814 } 3815 3816 final class EntryIterator extends HashIterator implements Iterator<Entry<K, V>> { 3817 3818 @Override 3819 public Entry<K, V> next() { 3820 return nextEntry(); 3821 } 3822 } 3823 3824 final class KeySet extends AbstractSet<K> { 3825 3826 @Override 3827 public Iterator<K> iterator() { 3828 return new KeyIterator(); 3829 } 3830 3831 @Override 3832 public int size() { 3833 return MapMakerInternalMap.this.size(); 3834 } 3835 3836 @Override 3837 public boolean isEmpty() { 3838 return MapMakerInternalMap.this.isEmpty(); 3839 } 3840 3841 @Override 3842 public boolean contains(Object o) { 3843 return MapMakerInternalMap.this.containsKey(o); 3844 } 3845 3846 @Override 3847 public boolean remove(Object o) { 3848 return MapMakerInternalMap.this.remove(o) != null; 3849 } 3850 3851 @Override 3852 public void clear() { 3853 MapMakerInternalMap.this.clear(); 3854 } 3855 } 3856 3857 final class Values extends AbstractCollection<V> { 3858 3859 @Override 3860 public Iterator<V> iterator() { 3861 return new ValueIterator(); 3862 } 3863 3864 @Override 3865 public int size() { 3866 return MapMakerInternalMap.this.size(); 3867 } 3868 3869 @Override 3870 public boolean isEmpty() { 3871 return MapMakerInternalMap.this.isEmpty(); 3872 } 3873 3874 @Override 3875 public boolean contains(Object o) { 3876 return MapMakerInternalMap.this.containsValue(o); 3877 } 3878 3879 @Override 3880 public void clear() { 3881 MapMakerInternalMap.this.clear(); 3882 } 3883 } 3884 3885 final class EntrySet extends AbstractSet<Entry<K, V>> { 3886 3887 @Override 3888 public Iterator<Entry<K, V>> iterator() { 3889 return new EntryIterator(); 3890 } 3891 3892 @Override 3893 public boolean contains(Object o) { 3894 if (!(o instanceof Entry)) { 3895 return false; 3896 } 3897 Entry<?, ?> e = (Entry<?, ?>) o; 3898 Object key = e.getKey(); 3899 if (key == null) { 3900 return false; 3901 } 3902 V v = MapMakerInternalMap.this.get(key); 3903 3904 return v != null && valueEquivalence.equivalent(e.getValue(), v); 3905 } 3906 3907 @Override 3908 public boolean remove(Object o) { 3909 if (!(o instanceof Entry)) { 3910 return false; 3911 } 3912 Entry<?, ?> e = (Entry<?, ?>) o; 3913 Object key = e.getKey(); 3914 return key != null && MapMakerInternalMap.this.remove(key, e.getValue()); 3915 } 3916 3917 @Override 3918 public int size() { 3919 return MapMakerInternalMap.this.size(); 3920 } 3921 3922 @Override 3923 public boolean isEmpty() { 3924 return MapMakerInternalMap.this.isEmpty(); 3925 } 3926 3927 @Override 3928 public void clear() { 3929 MapMakerInternalMap.this.clear(); 3930 } 3931 } 3932 3933 // Serialization Support 3934 3935 private static final long serialVersionUID = 5; 3936 3937 Object writeReplace() { 3938 return new SerializationProxy<K, V>(keyStrength, valueStrength, keyEquivalence, 3939 valueEquivalence, expireAfterWriteNanos, expireAfterAccessNanos, maximumSize, 3940 concurrencyLevel, removalListener, this); 3941 } 3942 3943 /** 3944 * The actual object that gets serialized. Unfortunately, readResolve() doesn't get called when a 3945 * circular dependency is present, so the proxy must be able to behave as the map itself. 3946 */ 3947 abstract static class AbstractSerializationProxy<K, V> 3948 extends ForwardingConcurrentMap<K, V> implements Serializable { 3949 private static final long serialVersionUID = 3; 3950 3951 final Strength keyStrength; 3952 final Strength valueStrength; 3953 final Equivalence<Object> keyEquivalence; 3954 final Equivalence<Object> valueEquivalence; 3955 final long expireAfterWriteNanos; 3956 final long expireAfterAccessNanos; 3957 final int maximumSize; 3958 final int concurrencyLevel; 3959 final RemovalListener<? super K, ? super V> removalListener; 3960 3961 transient ConcurrentMap<K, V> delegate; 3962 3963 AbstractSerializationProxy(Strength keyStrength, Strength valueStrength, 3964 Equivalence<Object> keyEquivalence, Equivalence<Object> valueEquivalence, 3965 long expireAfterWriteNanos, long expireAfterAccessNanos, int maximumSize, 3966 int concurrencyLevel, RemovalListener<? super K, ? super V> removalListener, 3967 ConcurrentMap<K, V> delegate) { 3968 this.keyStrength = keyStrength; 3969 this.valueStrength = valueStrength; 3970 this.keyEquivalence = keyEquivalence; 3971 this.valueEquivalence = valueEquivalence; 3972 this.expireAfterWriteNanos = expireAfterWriteNanos; 3973 this.expireAfterAccessNanos = expireAfterAccessNanos; 3974 this.maximumSize = maximumSize; 3975 this.concurrencyLevel = concurrencyLevel; 3976 this.removalListener = removalListener; 3977 this.delegate = delegate; 3978 } 3979 3980 @Override 3981 protected ConcurrentMap<K, V> delegate() { 3982 return delegate; 3983 } 3984 3985 void writeMapTo(ObjectOutputStream out) throws IOException { 3986 out.writeInt(delegate.size()); 3987 for (Entry<K, V> entry : delegate.entrySet()) { 3988 out.writeObject(entry.getKey()); 3989 out.writeObject(entry.getValue()); 3990 } 3991 out.writeObject(null); // terminate entries 3992 } 3993 3994 @SuppressWarnings("deprecation") // serialization of deprecated feature 3995 MapMaker readMapMaker(ObjectInputStream in) throws IOException { 3996 int size = in.readInt(); 3997 MapMaker mapMaker = new MapMaker() 3998 .initialCapacity(size) 3999 .setKeyStrength(keyStrength) 4000 .setValueStrength(valueStrength) 4001 .keyEquivalence(keyEquivalence) 4002 .valueEquivalence(valueEquivalence) 4003 .concurrencyLevel(concurrencyLevel); 4004 mapMaker.removalListener(removalListener); 4005 if (expireAfterWriteNanos > 0) { 4006 mapMaker.expireAfterWrite(expireAfterWriteNanos, TimeUnit.NANOSECONDS); 4007 } 4008 if (expireAfterAccessNanos > 0) { 4009 mapMaker.expireAfterAccess(expireAfterAccessNanos, TimeUnit.NANOSECONDS); 4010 } 4011 if (maximumSize != MapMaker.UNSET_INT) { 4012 mapMaker.maximumSize(maximumSize); 4013 } 4014 return mapMaker; 4015 } 4016 4017 @SuppressWarnings("unchecked") 4018 void readEntries(ObjectInputStream in) throws IOException, ClassNotFoundException { 4019 while (true) { 4020 K key = (K) in.readObject(); 4021 if (key == null) { 4022 break; // terminator 4023 } 4024 V value = (V) in.readObject(); 4025 delegate.put(key, value); 4026 } 4027 } 4028 } 4029 4030 /** 4031 * The actual object that gets serialized. Unfortunately, readResolve() doesn't get called when a 4032 * circular dependency is present, so the proxy must be able to behave as the map itself. 4033 */ 4034 private static final class SerializationProxy<K, V> extends AbstractSerializationProxy<K, V> { 4035 private static final long serialVersionUID = 3; 4036 4037 SerializationProxy(Strength keyStrength, Strength valueStrength, 4038 Equivalence<Object> keyEquivalence, Equivalence<Object> valueEquivalence, 4039 long expireAfterWriteNanos, long expireAfterAccessNanos, int maximumSize, 4040 int concurrencyLevel, RemovalListener<? super K, ? super V> removalListener, 4041 ConcurrentMap<K, V> delegate) { 4042 super(keyStrength, valueStrength, keyEquivalence, valueEquivalence, expireAfterWriteNanos, 4043 expireAfterAccessNanos, maximumSize, concurrencyLevel, removalListener, delegate); 4044 } 4045 4046 private void writeObject(ObjectOutputStream out) throws IOException { 4047 out.defaultWriteObject(); 4048 writeMapTo(out); 4049 } 4050 4051 private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { 4052 in.defaultReadObject(); 4053 MapMaker mapMaker = readMapMaker(in); 4054 delegate = mapMaker.makeMap(); 4055 readEntries(in); 4056 } 4057 4058 private Object readResolve() { 4059 return delegate; 4060 } 4061 } 4062 } 4063