1 /* 2 * Copyright (C) 2009 The Guava Authors 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except 5 * in compliance with the License. You may obtain a copy of the License at 6 * 7 * http://www.apache.org/licenses/LICENSE-2.0 8 * 9 * Unless required by applicable law or agreed to in writing, software distributed under the License 10 * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express 11 * or implied. See the License for the specific language governing permissions and limitations under 12 * the License. 13 */ 14 15 package com.google.common.cache; 16 17 import static com.google.common.base.Preconditions.checkNotNull; 18 import static com.google.common.base.Preconditions.checkState; 19 import static com.google.common.cache.CacheBuilder.NULL_TICKER; 20 import static com.google.common.cache.CacheBuilder.UNSET_INT; 21 import static com.google.common.util.concurrent.Futures.transform; 22 import static com.google.common.util.concurrent.MoreExecutors.directExecutor; 23 import static com.google.common.util.concurrent.Uninterruptibles.getUninterruptibly; 24 import static java.util.Collections.unmodifiableSet; 25 import static java.util.concurrent.TimeUnit.NANOSECONDS; 26 27 import com.google.common.annotations.GwtCompatible; 28 import com.google.common.annotations.GwtIncompatible; 29 import com.google.common.annotations.VisibleForTesting; 30 import com.google.common.base.Equivalence; 31 import com.google.common.base.Stopwatch; 32 import com.google.common.base.Ticker; 33 import com.google.common.cache.AbstractCache.SimpleStatsCounter; 34 import com.google.common.cache.AbstractCache.StatsCounter; 35 import com.google.common.cache.CacheBuilder.NullListener; 36 import com.google.common.cache.CacheBuilder.OneWeigher; 37 import com.google.common.cache.CacheLoader.InvalidCacheLoadException; 38 import com.google.common.cache.CacheLoader.UnsupportedLoadingOperationException; 39 import com.google.common.cache.LocalCache.AbstractCacheSet; 40 import com.google.common.collect.AbstractSequentialIterator; 41 import com.google.common.collect.ImmutableMap; 42 import com.google.common.collect.ImmutableSet; 43 import com.google.common.collect.Iterators; 44 import com.google.common.collect.Maps; 45 import com.google.common.collect.Sets; 46 import com.google.common.primitives.Ints; 47 import com.google.common.util.concurrent.ExecutionError; 48 import com.google.common.util.concurrent.Futures; 49 import com.google.common.util.concurrent.ListenableFuture; 50 import com.google.common.util.concurrent.SettableFuture; 51 import com.google.common.util.concurrent.UncheckedExecutionException; 52 import com.google.common.util.concurrent.Uninterruptibles; 53 import com.google.errorprone.annotations.CanIgnoreReturnValue; 54 import com.google.errorprone.annotations.concurrent.GuardedBy; 55 import com.google.errorprone.annotations.concurrent.LazyInit; 56 import com.google.j2objc.annotations.RetainedWith; 57 import com.google.j2objc.annotations.Weak; 58 import java.io.IOException; 59 import java.io.InvalidObjectException; 60 import java.io.ObjectInputStream; 61 import java.io.Serializable; 62 import java.lang.ref.Reference; 63 import java.lang.ref.ReferenceQueue; 64 import java.lang.ref.SoftReference; 65 import java.lang.ref.WeakReference; 66 import java.util.AbstractCollection; 67 import java.util.AbstractMap; 68 import java.util.AbstractQueue; 69 import java.util.AbstractSet; 70 import java.util.ArrayList; 71 import java.util.Collection; 72 import java.util.Iterator; 73 import java.util.Map; 74 import java.util.NoSuchElementException; 75 import java.util.Queue; 76 import java.util.Set; 77 import java.util.concurrent.Callable; 78 import java.util.concurrent.ConcurrentLinkedQueue; 79 import java.util.concurrent.ConcurrentMap; 80 import java.util.concurrent.ExecutionException; 81 import java.util.concurrent.TimeUnit; 82 import java.util.concurrent.atomic.AtomicInteger; 83 import java.util.concurrent.atomic.AtomicReferenceArray; 84 import java.util.concurrent.locks.ReentrantLock; 85 import java.util.logging.Level; 86 import java.util.logging.Logger; 87 import javax.annotation.CheckForNull; 88 89 /** 90 * The concurrent hash map implementation built by {@link CacheBuilder}. 91 * 92 * <p>This implementation is heavily derived from revision 1.96 of <a 93 * href="http://tinyurl.com/ConcurrentHashMap">ConcurrentHashMap.java</a>. 94 * 95 * @author Charles Fry 96 * @author Bob Lee ({@code com.google.common.collect.MapMaker}) 97 * @author Doug Lea ({@code ConcurrentHashMap}) 98 */ 99 @SuppressWarnings({ 100 "GoodTime", // lots of violations (nanosecond math) 101 "nullness", // too much trouble for the payoff 102 }) 103 @GwtCompatible(emulated = true) 104 // TODO(cpovirk): Annotate for nullness. 105 class LocalCache<K, V> extends AbstractMap<K, V> implements ConcurrentMap<K, V> { 106 107 /* 108 * The basic strategy is to subdivide the table among Segments, each of which itself is a 109 * concurrently readable hash table. The map supports non-blocking reads and concurrent writes 110 * across different segments. 111 * 112 * If a maximum size is specified, a best-effort bounding is performed per segment, using a 113 * page-replacement algorithm to determine which entries to evict when the capacity has been 114 * exceeded. 115 * 116 * The page replacement algorithm's data structures are kept casually consistent with the map. The 117 * ordering of writes to a segment is sequentially consistent. An update to the map and recording 118 * of reads may not be immediately reflected on the algorithm's data structures. These structures 119 * are guarded by a lock and operations are applied in batches to avoid lock contention. The 120 * penalty of applying the batches is spread across threads so that the amortized cost is slightly 121 * higher than performing just the operation without enforcing the capacity constraint. 122 * 123 * This implementation uses a per-segment queue to record a memento of the additions, removals, 124 * and accesses that were performed on the map. The queue is drained on writes and when it exceeds 125 * its capacity threshold. 126 * 127 * The Least Recently Used page replacement algorithm was chosen due to its simplicity, high hit 128 * rate, and ability to be implemented with O(1) time complexity. The initial LRU implementation 129 * operates per-segment rather than globally for increased implementation simplicity. We expect 130 * the cache hit rate to be similar to that of a global LRU algorithm. 131 */ 132 133 // Constants 134 135 /** 136 * The maximum capacity, used if a higher value is implicitly specified by either of the 137 * constructors with arguments. MUST be a power of two {@code <= 1<<30} to ensure that entries are 138 * indexable using ints. 139 */ 140 static final int MAXIMUM_CAPACITY = 1 << 30; 141 142 /** The maximum number of segments to allow; used to bound constructor arguments. */ 143 static final int MAX_SEGMENTS = 1 << 16; // slightly conservative 144 145 /** Number of (unsynchronized) retries in the containsValue method. */ 146 static final int CONTAINS_VALUE_RETRIES = 3; 147 148 /** 149 * Number of cache access operations that can be buffered per segment before the cache's recency 150 * ordering information is updated. This is used to avoid lock contention by recording a memento 151 * of reads and delaying a lock acquisition until the threshold is crossed or a mutation occurs. 152 * 153 * <p>This must be a (2^n)-1 as it is used as a mask. 154 */ 155 static final int DRAIN_THRESHOLD = 0x3F; 156 157 /** 158 * Maximum number of entries to be drained in a single cleanup run. This applies independently to 159 * the cleanup queue and both reference queues. 160 */ 161 // TODO(fry): empirically optimize this 162 static final int DRAIN_MAX = 16; 163 164 // Fields 165 166 static final Logger logger = Logger.getLogger(LocalCache.class.getName()); 167 168 /** 169 * Mask value for indexing into segments. The upper bits of a key's hash code are used to choose 170 * the segment. 171 */ 172 final int segmentMask; 173 174 /** 175 * Shift value for indexing within segments. Helps prevent entries that end up in the same segment 176 * from also ending up in the same bucket. 177 */ 178 final int segmentShift; 179 180 /** The segments, each of which is a specialized hash table. */ 181 final Segment<K, V>[] segments; 182 183 /** The concurrency level. */ 184 final int concurrencyLevel; 185 186 /** Strategy for comparing keys. */ 187 final Equivalence<Object> keyEquivalence; 188 189 /** Strategy for comparing values. */ 190 final Equivalence<Object> valueEquivalence; 191 192 /** Strategy for referencing keys. */ 193 final Strength keyStrength; 194 195 /** Strategy for referencing values. */ 196 final Strength valueStrength; 197 198 /** The maximum weight of this map. UNSET_INT if there is no maximum. */ 199 final long maxWeight; 200 201 /** Weigher to weigh cache entries. */ 202 final Weigher<K, V> weigher; 203 204 /** How long after the last access to an entry the map will retain that entry. */ 205 final long expireAfterAccessNanos; 206 207 /** How long after the last write to an entry the map will retain that entry. */ 208 final long expireAfterWriteNanos; 209 210 /** How long after the last write an entry becomes a candidate for refresh. */ 211 final long refreshNanos; 212 213 /** Entries waiting to be consumed by the removal listener. */ 214 // TODO(fry): define a new type which creates event objects and automates the clear logic 215 final Queue<RemovalNotification<K, V>> removalNotificationQueue; 216 217 /** 218 * A listener that is invoked when an entry is removed due to expiration or garbage collection of 219 * soft/weak entries. 220 */ 221 final RemovalListener<K, V> removalListener; 222 223 /** Measures time in a testable way. */ 224 final Ticker ticker; 225 226 /** Factory used to create new entries. */ 227 final EntryFactory entryFactory; 228 229 /** 230 * Accumulates global cache statistics. Note that there are also per-segments stats counters which 231 * must be aggregated to obtain a global stats view. 232 */ 233 final StatsCounter globalStatsCounter; 234 235 /** The default cache loader to use on loading operations. */ 236 @CheckForNull final CacheLoader<? super K, V> defaultLoader; 237 238 /** 239 * Creates a new, empty map with the specified strategy, initial capacity and concurrency level. 240 */ LocalCache( CacheBuilder<? super K, ? super V> builder, @CheckForNull CacheLoader<? super K, V> loader)241 LocalCache( 242 CacheBuilder<? super K, ? super V> builder, @CheckForNull CacheLoader<? super K, V> loader) { 243 concurrencyLevel = Math.min(builder.getConcurrencyLevel(), MAX_SEGMENTS); 244 245 keyStrength = builder.getKeyStrength(); 246 valueStrength = builder.getValueStrength(); 247 248 keyEquivalence = builder.getKeyEquivalence(); 249 valueEquivalence = builder.getValueEquivalence(); 250 251 maxWeight = builder.getMaximumWeight(); 252 weigher = builder.getWeigher(); 253 expireAfterAccessNanos = builder.getExpireAfterAccessNanos(); 254 expireAfterWriteNanos = builder.getExpireAfterWriteNanos(); 255 refreshNanos = builder.getRefreshNanos(); 256 257 removalListener = builder.getRemovalListener(); 258 removalNotificationQueue = 259 (removalListener == NullListener.INSTANCE) 260 ? LocalCache.discardingQueue() 261 : new ConcurrentLinkedQueue<>(); 262 263 ticker = builder.getTicker(recordsTime()); 264 entryFactory = EntryFactory.getFactory(keyStrength, usesAccessEntries(), usesWriteEntries()); 265 globalStatsCounter = builder.getStatsCounterSupplier().get(); 266 defaultLoader = loader; 267 268 int initialCapacity = Math.min(builder.getInitialCapacity(), MAXIMUM_CAPACITY); 269 if (evictsBySize() && !customWeigher()) { 270 initialCapacity = (int) Math.min(initialCapacity, maxWeight); 271 } 272 273 // Find the lowest power-of-two segmentCount that exceeds concurrencyLevel, unless 274 // maximumSize/Weight is specified in which case ensure that each segment gets at least 10 275 // entries. The special casing for size-based eviction is only necessary because that eviction 276 // happens per segment instead of globally, so too many segments compared to the maximum size 277 // will result in random eviction behavior. 278 int segmentShift = 0; 279 int segmentCount = 1; 280 while (segmentCount < concurrencyLevel 281 && (!evictsBySize() || segmentCount * 20L <= maxWeight)) { 282 ++segmentShift; 283 segmentCount <<= 1; 284 } 285 this.segmentShift = 32 - segmentShift; 286 segmentMask = segmentCount - 1; 287 288 this.segments = newSegmentArray(segmentCount); 289 290 int segmentCapacity = initialCapacity / segmentCount; 291 if (segmentCapacity * segmentCount < initialCapacity) { 292 ++segmentCapacity; 293 } 294 295 int segmentSize = 1; 296 while (segmentSize < segmentCapacity) { 297 segmentSize <<= 1; 298 } 299 300 if (evictsBySize()) { 301 // Ensure sum of segment max weights = overall max weights 302 long maxSegmentWeight = maxWeight / segmentCount + 1; 303 long remainder = maxWeight % segmentCount; 304 for (int i = 0; i < this.segments.length; ++i) { 305 if (i == remainder) { 306 maxSegmentWeight--; 307 } 308 this.segments[i] = 309 createSegment(segmentSize, maxSegmentWeight, builder.getStatsCounterSupplier().get()); 310 } 311 } else { 312 for (int i = 0; i < this.segments.length; ++i) { 313 this.segments[i] = 314 createSegment(segmentSize, UNSET_INT, builder.getStatsCounterSupplier().get()); 315 } 316 } 317 } 318 evictsBySize()319 boolean evictsBySize() { 320 return maxWeight >= 0; 321 } 322 customWeigher()323 boolean customWeigher() { 324 return weigher != OneWeigher.INSTANCE; 325 } 326 expires()327 boolean expires() { 328 return expiresAfterWrite() || expiresAfterAccess(); 329 } 330 expiresAfterWrite()331 boolean expiresAfterWrite() { 332 return expireAfterWriteNanos > 0; 333 } 334 expiresAfterAccess()335 boolean expiresAfterAccess() { 336 return expireAfterAccessNanos > 0; 337 } 338 refreshes()339 boolean refreshes() { 340 return refreshNanos > 0; 341 } 342 usesAccessQueue()343 boolean usesAccessQueue() { 344 return expiresAfterAccess() || evictsBySize(); 345 } 346 usesWriteQueue()347 boolean usesWriteQueue() { 348 return expiresAfterWrite(); 349 } 350 recordsWrite()351 boolean recordsWrite() { 352 return expiresAfterWrite() || refreshes(); 353 } 354 recordsAccess()355 boolean recordsAccess() { 356 return expiresAfterAccess(); 357 } 358 recordsTime()359 boolean recordsTime() { 360 return recordsWrite() || recordsAccess(); 361 } 362 usesWriteEntries()363 boolean usesWriteEntries() { 364 return usesWriteQueue() || recordsWrite(); 365 } 366 usesAccessEntries()367 boolean usesAccessEntries() { 368 return usesAccessQueue() || recordsAccess(); 369 } 370 usesKeyReferences()371 boolean usesKeyReferences() { 372 return keyStrength != Strength.STRONG; 373 } 374 usesValueReferences()375 boolean usesValueReferences() { 376 return valueStrength != Strength.STRONG; 377 } 378 379 enum Strength { 380 /* 381 * TODO(kevinb): If we strongly reference the value and aren't loading, we needn't wrap the 382 * value. This could save ~8 bytes per entry. 383 */ 384 385 STRONG { 386 @Override referenceValue( Segment<K, V> segment, ReferenceEntry<K, V> entry, V value, int weight)387 <K, V> ValueReference<K, V> referenceValue( 388 Segment<K, V> segment, ReferenceEntry<K, V> entry, V value, int weight) { 389 return (weight == 1) 390 ? new StrongValueReference<K, V>(value) 391 : new WeightedStrongValueReference<K, V>(value, weight); 392 } 393 394 @Override defaultEquivalence()395 Equivalence<Object> defaultEquivalence() { 396 return Equivalence.equals(); 397 } 398 }, 399 SOFT { 400 @Override referenceValue( Segment<K, V> segment, ReferenceEntry<K, V> entry, V value, int weight)401 <K, V> ValueReference<K, V> referenceValue( 402 Segment<K, V> segment, ReferenceEntry<K, V> entry, V value, int weight) { 403 return (weight == 1) 404 ? new SoftValueReference<K, V>(segment.valueReferenceQueue, value, entry) 405 : new WeightedSoftValueReference<K, V>( 406 segment.valueReferenceQueue, value, entry, weight); 407 } 408 409 @Override defaultEquivalence()410 Equivalence<Object> defaultEquivalence() { 411 return Equivalence.identity(); 412 } 413 }, 414 WEAK { 415 @Override referenceValue( Segment<K, V> segment, ReferenceEntry<K, V> entry, V value, int weight)416 <K, V> ValueReference<K, V> referenceValue( 417 Segment<K, V> segment, ReferenceEntry<K, V> entry, V value, int weight) { 418 return (weight == 1) 419 ? new WeakValueReference<K, V>(segment.valueReferenceQueue, value, entry) 420 : new WeightedWeakValueReference<K, V>( 421 segment.valueReferenceQueue, value, entry, weight); 422 } 423 424 @Override defaultEquivalence()425 Equivalence<Object> defaultEquivalence() { 426 return Equivalence.identity(); 427 } 428 }; 429 430 /** Creates a reference for the given value according to this value strength. */ referenceValue( Segment<K, V> segment, ReferenceEntry<K, V> entry, V value, int weight)431 abstract <K, V> ValueReference<K, V> referenceValue( 432 Segment<K, V> segment, ReferenceEntry<K, V> entry, V value, int weight); 433 434 /** 435 * Returns the default equivalence strategy used to compare and hash keys or values referenced 436 * at this strength. This strategy will be used unless the user explicitly specifies an 437 * alternate strategy. 438 */ defaultEquivalence()439 abstract Equivalence<Object> defaultEquivalence(); 440 } 441 442 /** Creates new entries. */ 443 enum EntryFactory { 444 STRONG { 445 @Override newEntry( Segment<K, V> segment, K key, int hash, @CheckForNull ReferenceEntry<K, V> next)446 <K, V> ReferenceEntry<K, V> newEntry( 447 Segment<K, V> segment, K key, int hash, @CheckForNull ReferenceEntry<K, V> next) { 448 return new StrongEntry<>(key, hash, next); 449 } 450 }, 451 STRONG_ACCESS { 452 @Override newEntry( Segment<K, V> segment, K key, int hash, @CheckForNull ReferenceEntry<K, V> next)453 <K, V> ReferenceEntry<K, V> newEntry( 454 Segment<K, V> segment, K key, int hash, @CheckForNull ReferenceEntry<K, V> next) { 455 return new StrongAccessEntry<>(key, hash, next); 456 } 457 458 @Override copyEntry( Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext, K key)459 <K, V> ReferenceEntry<K, V> copyEntry( 460 Segment<K, V> segment, 461 ReferenceEntry<K, V> original, 462 ReferenceEntry<K, V> newNext, 463 K key) { 464 ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext, key); 465 copyAccessEntry(original, newEntry); 466 return newEntry; 467 } 468 }, 469 STRONG_WRITE { 470 @Override newEntry( Segment<K, V> segment, K key, int hash, @CheckForNull ReferenceEntry<K, V> next)471 <K, V> ReferenceEntry<K, V> newEntry( 472 Segment<K, V> segment, K key, int hash, @CheckForNull ReferenceEntry<K, V> next) { 473 return new StrongWriteEntry<>(key, hash, next); 474 } 475 476 @Override copyEntry( Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext, K key)477 <K, V> ReferenceEntry<K, V> copyEntry( 478 Segment<K, V> segment, 479 ReferenceEntry<K, V> original, 480 ReferenceEntry<K, V> newNext, 481 K key) { 482 ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext, key); 483 copyWriteEntry(original, newEntry); 484 return newEntry; 485 } 486 }, 487 STRONG_ACCESS_WRITE { 488 @Override newEntry( Segment<K, V> segment, K key, int hash, @CheckForNull ReferenceEntry<K, V> next)489 <K, V> ReferenceEntry<K, V> newEntry( 490 Segment<K, V> segment, K key, int hash, @CheckForNull ReferenceEntry<K, V> next) { 491 return new StrongAccessWriteEntry<>(key, hash, next); 492 } 493 494 @Override copyEntry( Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext, K key)495 <K, V> ReferenceEntry<K, V> copyEntry( 496 Segment<K, V> segment, 497 ReferenceEntry<K, V> original, 498 ReferenceEntry<K, V> newNext, 499 K key) { 500 ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext, key); 501 copyAccessEntry(original, newEntry); 502 copyWriteEntry(original, newEntry); 503 return newEntry; 504 } 505 }, 506 WEAK { 507 @Override newEntry( Segment<K, V> segment, K key, int hash, @CheckForNull ReferenceEntry<K, V> next)508 <K, V> ReferenceEntry<K, V> newEntry( 509 Segment<K, V> segment, K key, int hash, @CheckForNull ReferenceEntry<K, V> next) { 510 return new WeakEntry<>(segment.keyReferenceQueue, key, hash, next); 511 } 512 }, 513 WEAK_ACCESS { 514 @Override newEntry( Segment<K, V> segment, K key, int hash, @CheckForNull ReferenceEntry<K, V> next)515 <K, V> ReferenceEntry<K, V> newEntry( 516 Segment<K, V> segment, K key, int hash, @CheckForNull ReferenceEntry<K, V> next) { 517 return new WeakAccessEntry<>(segment.keyReferenceQueue, key, hash, next); 518 } 519 520 @Override copyEntry( Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext, K key)521 <K, V> ReferenceEntry<K, V> copyEntry( 522 Segment<K, V> segment, 523 ReferenceEntry<K, V> original, 524 ReferenceEntry<K, V> newNext, 525 K key) { 526 ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext, key); 527 copyAccessEntry(original, newEntry); 528 return newEntry; 529 } 530 }, 531 WEAK_WRITE { 532 @Override newEntry( Segment<K, V> segment, K key, int hash, @CheckForNull ReferenceEntry<K, V> next)533 <K, V> ReferenceEntry<K, V> newEntry( 534 Segment<K, V> segment, K key, int hash, @CheckForNull ReferenceEntry<K, V> next) { 535 return new WeakWriteEntry<>(segment.keyReferenceQueue, key, hash, next); 536 } 537 538 @Override copyEntry( Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext, K key)539 <K, V> ReferenceEntry<K, V> copyEntry( 540 Segment<K, V> segment, 541 ReferenceEntry<K, V> original, 542 ReferenceEntry<K, V> newNext, 543 K key) { 544 ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext, key); 545 copyWriteEntry(original, newEntry); 546 return newEntry; 547 } 548 }, 549 WEAK_ACCESS_WRITE { 550 @Override newEntry( Segment<K, V> segment, K key, int hash, @CheckForNull ReferenceEntry<K, V> next)551 <K, V> ReferenceEntry<K, V> newEntry( 552 Segment<K, V> segment, K key, int hash, @CheckForNull ReferenceEntry<K, V> next) { 553 return new WeakAccessWriteEntry<>(segment.keyReferenceQueue, key, hash, next); 554 } 555 556 @Override copyEntry( Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext, K key)557 <K, V> ReferenceEntry<K, V> copyEntry( 558 Segment<K, V> segment, 559 ReferenceEntry<K, V> original, 560 ReferenceEntry<K, V> newNext, 561 K key) { 562 ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext, key); 563 copyAccessEntry(original, newEntry); 564 copyWriteEntry(original, newEntry); 565 return newEntry; 566 } 567 }; 568 569 // Masks used to compute indices in the following table. 570 571 static final int ACCESS_MASK = 1; 572 static final int WRITE_MASK = 2; 573 static final int WEAK_MASK = 4; 574 575 /** Look-up table for factories. */ 576 static final EntryFactory[] factories = { 577 STRONG, 578 STRONG_ACCESS, 579 STRONG_WRITE, 580 STRONG_ACCESS_WRITE, 581 WEAK, 582 WEAK_ACCESS, 583 WEAK_WRITE, 584 WEAK_ACCESS_WRITE, 585 }; 586 getFactory( Strength keyStrength, boolean usesAccessQueue, boolean usesWriteQueue)587 static EntryFactory getFactory( 588 Strength keyStrength, boolean usesAccessQueue, boolean usesWriteQueue) { 589 int flags = 590 ((keyStrength == Strength.WEAK) ? WEAK_MASK : 0) 591 | (usesAccessQueue ? ACCESS_MASK : 0) 592 | (usesWriteQueue ? WRITE_MASK : 0); 593 return factories[flags]; 594 } 595 596 /** 597 * Creates a new entry. 598 * 599 * @param segment to create the entry for 600 * @param key of the entry 601 * @param hash of the key 602 * @param next entry in the same bucket 603 */ newEntry( Segment<K, V> segment, K key, int hash, @CheckForNull ReferenceEntry<K, V> next)604 abstract <K, V> ReferenceEntry<K, V> newEntry( 605 Segment<K, V> segment, K key, int hash, @CheckForNull ReferenceEntry<K, V> next); 606 607 /** 608 * Copies an entry, assigning it a new {@code next} entry. 609 * 610 * @param original the entry to copy. But avoid calling {@code getKey} on it: Instead, use the 611 * {@code key} parameter. That way, we prevent the key from being garbage collected in the 612 * case of weak keys. If we create a new entry with a key that is null at construction time, 613 * we're not sure if entry will necessarily ever be garbage collected. 614 * @param newNext entry in the same bucket 615 * @param key the key to copy from the original entry to the new one. Use this in preference to 616 * {@code original.getKey()}. 617 */ 618 // Guarded By Segment.this copyEntry( Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext, K key)619 <K, V> ReferenceEntry<K, V> copyEntry( 620 Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext, K key) { 621 return newEntry(segment, key, original.getHash(), newNext); 622 } 623 624 // Guarded By Segment.this copyAccessEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newEntry)625 <K, V> void copyAccessEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newEntry) { 626 // TODO(fry): when we link values instead of entries this method can go 627 // away, as can connectAccessOrder, nullifyAccessOrder. 628 newEntry.setAccessTime(original.getAccessTime()); 629 630 connectAccessOrder(original.getPreviousInAccessQueue(), newEntry); 631 connectAccessOrder(newEntry, original.getNextInAccessQueue()); 632 633 nullifyAccessOrder(original); 634 } 635 636 // Guarded By Segment.this copyWriteEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newEntry)637 <K, V> void copyWriteEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newEntry) { 638 // TODO(fry): when we link values instead of entries this method can go 639 // away, as can connectWriteOrder, nullifyWriteOrder. 640 newEntry.setWriteTime(original.getWriteTime()); 641 642 connectWriteOrder(original.getPreviousInWriteQueue(), newEntry); 643 connectWriteOrder(newEntry, original.getNextInWriteQueue()); 644 645 nullifyWriteOrder(original); 646 } 647 } 648 649 /** A reference to a value. */ 650 interface ValueReference<K, V> { 651 /** Returns the value. Does not block or throw exceptions. */ 652 @CheckForNull get()653 V get(); 654 655 /** 656 * Waits for a value that may still be loading. Unlike get(), this method can block (in the case 657 * of FutureValueReference). 658 * 659 * @throws ExecutionException if the loading thread throws an exception 660 * @throws ExecutionError if the loading thread throws an error 661 */ waitForValue()662 V waitForValue() throws ExecutionException; 663 664 /** Returns the weight of this entry. This is assumed to be static between calls to setValue. */ getWeight()665 int getWeight(); 666 667 /** 668 * Returns the entry associated with this value reference, or {@code null} if this value 669 * reference is independent of any entry. 670 */ 671 @CheckForNull getEntry()672 ReferenceEntry<K, V> getEntry(); 673 674 /** 675 * Creates a copy of this reference for the given entry. 676 * 677 * <p>{@code value} may be null only for a loading reference. 678 */ copyFor( ReferenceQueue<V> queue, @CheckForNull V value, ReferenceEntry<K, V> entry)679 ValueReference<K, V> copyFor( 680 ReferenceQueue<V> queue, @CheckForNull V value, ReferenceEntry<K, V> entry); 681 682 /** 683 * Notify pending loads that a new value was set. This is only relevant to loading value 684 * references. 685 */ notifyNewValue(@heckForNull V newValue)686 void notifyNewValue(@CheckForNull V newValue); 687 688 /** 689 * Returns true if a new value is currently loading, regardless of whether there is an existing 690 * value. It is assumed that the return value of this method is constant for any given 691 * ValueReference instance. 692 */ isLoading()693 boolean isLoading(); 694 695 /** 696 * Returns true if this reference contains an active value, meaning one that is still considered 697 * present in the cache. Active values consist of live values, which are returned by cache 698 * lookups, and dead values, which have been evicted but awaiting removal. Non-active values 699 * consist strictly of loading values, though during refresh a value may be both active and 700 * loading. 701 */ isActive()702 boolean isActive(); 703 } 704 705 /** Placeholder. Indicates that the value hasn't been set yet. */ 706 static final ValueReference<Object, Object> UNSET = 707 new ValueReference<Object, Object>() { 708 @CheckForNull 709 @Override 710 public Object get() { 711 return null; 712 } 713 714 @Override 715 public int getWeight() { 716 return 0; 717 } 718 719 @CheckForNull 720 @Override 721 public ReferenceEntry<Object, Object> getEntry() { 722 return null; 723 } 724 725 @Override 726 public ValueReference<Object, Object> copyFor( 727 ReferenceQueue<Object> queue, 728 @CheckForNull Object value, 729 ReferenceEntry<Object, Object> entry) { 730 return this; 731 } 732 733 @Override 734 public boolean isLoading() { 735 return false; 736 } 737 738 @Override 739 public boolean isActive() { 740 return false; 741 } 742 743 @CheckForNull 744 @Override 745 public Object waitForValue() { 746 return null; 747 } 748 749 @Override 750 public void notifyNewValue(Object newValue) {} 751 }; 752 753 /** Singleton placeholder that indicates a value is being loaded. */ 754 @SuppressWarnings("unchecked") // impl never uses a parameter or returns any non-null value unset()755 static <K, V> ValueReference<K, V> unset() { 756 return (ValueReference<K, V>) UNSET; 757 } 758 759 private enum NullEntry implements ReferenceEntry<Object, Object> { 760 INSTANCE; 761 762 @CheckForNull 763 @Override getValueReference()764 public ValueReference<Object, Object> getValueReference() { 765 return null; 766 } 767 768 @Override setValueReference(ValueReference<Object, Object> valueReference)769 public void setValueReference(ValueReference<Object, Object> valueReference) {} 770 771 @CheckForNull 772 @Override getNext()773 public ReferenceEntry<Object, Object> getNext() { 774 return null; 775 } 776 777 @Override getHash()778 public int getHash() { 779 return 0; 780 } 781 782 @CheckForNull 783 @Override getKey()784 public Object getKey() { 785 return null; 786 } 787 788 @Override getAccessTime()789 public long getAccessTime() { 790 return 0; 791 } 792 793 @Override setAccessTime(long time)794 public void setAccessTime(long time) {} 795 796 @Override getNextInAccessQueue()797 public ReferenceEntry<Object, Object> getNextInAccessQueue() { 798 return this; 799 } 800 801 @Override setNextInAccessQueue(ReferenceEntry<Object, Object> next)802 public void setNextInAccessQueue(ReferenceEntry<Object, Object> next) {} 803 804 @Override getPreviousInAccessQueue()805 public ReferenceEntry<Object, Object> getPreviousInAccessQueue() { 806 return this; 807 } 808 809 @Override setPreviousInAccessQueue(ReferenceEntry<Object, Object> previous)810 public void setPreviousInAccessQueue(ReferenceEntry<Object, Object> previous) {} 811 812 @Override getWriteTime()813 public long getWriteTime() { 814 return 0; 815 } 816 817 @Override setWriteTime(long time)818 public void setWriteTime(long time) {} 819 820 @Override getNextInWriteQueue()821 public ReferenceEntry<Object, Object> getNextInWriteQueue() { 822 return this; 823 } 824 825 @Override setNextInWriteQueue(ReferenceEntry<Object, Object> next)826 public void setNextInWriteQueue(ReferenceEntry<Object, Object> next) {} 827 828 @Override getPreviousInWriteQueue()829 public ReferenceEntry<Object, Object> getPreviousInWriteQueue() { 830 return this; 831 } 832 833 @Override setPreviousInWriteQueue(ReferenceEntry<Object, Object> previous)834 public void setPreviousInWriteQueue(ReferenceEntry<Object, Object> previous) {} 835 } 836 837 abstract static class AbstractReferenceEntry<K, V> implements ReferenceEntry<K, V> { 838 @Override getValueReference()839 public ValueReference<K, V> getValueReference() { 840 throw new UnsupportedOperationException(); 841 } 842 843 @Override setValueReference(ValueReference<K, V> valueReference)844 public void setValueReference(ValueReference<K, V> valueReference) { 845 throw new UnsupportedOperationException(); 846 } 847 848 @Override getNext()849 public ReferenceEntry<K, V> getNext() { 850 throw new UnsupportedOperationException(); 851 } 852 853 @Override getHash()854 public int getHash() { 855 throw new UnsupportedOperationException(); 856 } 857 858 @Override getKey()859 public K getKey() { 860 throw new UnsupportedOperationException(); 861 } 862 863 @Override getAccessTime()864 public long getAccessTime() { 865 throw new UnsupportedOperationException(); 866 } 867 868 @Override setAccessTime(long time)869 public void setAccessTime(long time) { 870 throw new UnsupportedOperationException(); 871 } 872 873 @Override getNextInAccessQueue()874 public ReferenceEntry<K, V> getNextInAccessQueue() { 875 throw new UnsupportedOperationException(); 876 } 877 878 @Override setNextInAccessQueue(ReferenceEntry<K, V> next)879 public void setNextInAccessQueue(ReferenceEntry<K, V> next) { 880 throw new UnsupportedOperationException(); 881 } 882 883 @Override getPreviousInAccessQueue()884 public ReferenceEntry<K, V> getPreviousInAccessQueue() { 885 throw new UnsupportedOperationException(); 886 } 887 888 @Override setPreviousInAccessQueue(ReferenceEntry<K, V> previous)889 public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) { 890 throw new UnsupportedOperationException(); 891 } 892 893 @Override getWriteTime()894 public long getWriteTime() { 895 throw new UnsupportedOperationException(); 896 } 897 898 @Override setWriteTime(long time)899 public void setWriteTime(long time) { 900 throw new UnsupportedOperationException(); 901 } 902 903 @Override getNextInWriteQueue()904 public ReferenceEntry<K, V> getNextInWriteQueue() { 905 throw new UnsupportedOperationException(); 906 } 907 908 @Override setNextInWriteQueue(ReferenceEntry<K, V> next)909 public void setNextInWriteQueue(ReferenceEntry<K, V> next) { 910 throw new UnsupportedOperationException(); 911 } 912 913 @Override getPreviousInWriteQueue()914 public ReferenceEntry<K, V> getPreviousInWriteQueue() { 915 throw new UnsupportedOperationException(); 916 } 917 918 @Override setPreviousInWriteQueue(ReferenceEntry<K, V> previous)919 public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) { 920 throw new UnsupportedOperationException(); 921 } 922 } 923 924 @SuppressWarnings("unchecked") // impl never uses a parameter or returns any non-null value nullEntry()925 static <K, V> ReferenceEntry<K, V> nullEntry() { 926 return (ReferenceEntry<K, V>) NullEntry.INSTANCE; 927 } 928 929 static final Queue<?> DISCARDING_QUEUE = 930 new AbstractQueue<Object>() { 931 @Override 932 public boolean offer(Object o) { 933 return true; 934 } 935 936 @CheckForNull 937 @Override 938 public Object peek() { 939 return null; 940 } 941 942 @CheckForNull 943 @Override 944 public Object poll() { 945 return null; 946 } 947 948 @Override 949 public int size() { 950 return 0; 951 } 952 953 @Override 954 public Iterator<Object> iterator() { 955 return ImmutableSet.of().iterator(); 956 } 957 }; 958 959 /** Queue that discards all elements. */ 960 @SuppressWarnings("unchecked") // impl never uses a parameter or returns any non-null value discardingQueue()961 static <E> Queue<E> discardingQueue() { 962 return (Queue) DISCARDING_QUEUE; 963 } 964 965 /* 966 * Note: All of this duplicate code sucks, but it saves a lot of memory. If only Java had mixins! 967 * To maintain this code, make a change for the strong reference type. Then, cut and paste, and 968 * replace "Strong" with "Soft" or "Weak" within the pasted text. The primary difference is that 969 * strong entries store the key reference directly while soft and weak entries delegate to their 970 * respective superclasses. 971 */ 972 973 /** Used for strongly-referenced keys. */ 974 static class StrongEntry<K, V> extends AbstractReferenceEntry<K, V> { 975 final K key; 976 StrongEntry(K key, int hash, @CheckForNull ReferenceEntry<K, V> next)977 StrongEntry(K key, int hash, @CheckForNull ReferenceEntry<K, V> next) { 978 this.key = key; 979 this.hash = hash; 980 this.next = next; 981 } 982 983 @Override getKey()984 public K getKey() { 985 return this.key; 986 } 987 988 // The code below is exactly the same for each entry type. 989 990 final int hash; 991 @CheckForNull final ReferenceEntry<K, V> next; 992 volatile ValueReference<K, V> valueReference = unset(); 993 994 @Override getValueReference()995 public ValueReference<K, V> getValueReference() { 996 return valueReference; 997 } 998 999 @Override setValueReference(ValueReference<K, V> valueReference)1000 public void setValueReference(ValueReference<K, V> valueReference) { 1001 this.valueReference = valueReference; 1002 } 1003 1004 @Override getHash()1005 public int getHash() { 1006 return hash; 1007 } 1008 1009 @Override getNext()1010 public ReferenceEntry<K, V> getNext() { 1011 return next; 1012 } 1013 } 1014 1015 static final class StrongAccessEntry<K, V> extends StrongEntry<K, V> { StrongAccessEntry(K key, int hash, @CheckForNull ReferenceEntry<K, V> next)1016 StrongAccessEntry(K key, int hash, @CheckForNull ReferenceEntry<K, V> next) { 1017 super(key, hash, next); 1018 } 1019 1020 // The code below is exactly the same for each access entry type. 1021 1022 volatile long accessTime = Long.MAX_VALUE; 1023 1024 @Override getAccessTime()1025 public long getAccessTime() { 1026 return accessTime; 1027 } 1028 1029 @Override setAccessTime(long time)1030 public void setAccessTime(long time) { 1031 this.accessTime = time; 1032 } 1033 1034 // Guarded By Segment.this 1035 @Weak ReferenceEntry<K, V> nextAccess = nullEntry(); 1036 1037 @Override getNextInAccessQueue()1038 public ReferenceEntry<K, V> getNextInAccessQueue() { 1039 return nextAccess; 1040 } 1041 1042 @Override setNextInAccessQueue(ReferenceEntry<K, V> next)1043 public void setNextInAccessQueue(ReferenceEntry<K, V> next) { 1044 this.nextAccess = next; 1045 } 1046 1047 // Guarded By Segment.this 1048 @Weak ReferenceEntry<K, V> previousAccess = nullEntry(); 1049 1050 @Override getPreviousInAccessQueue()1051 public ReferenceEntry<K, V> getPreviousInAccessQueue() { 1052 return previousAccess; 1053 } 1054 1055 @Override setPreviousInAccessQueue(ReferenceEntry<K, V> previous)1056 public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) { 1057 this.previousAccess = previous; 1058 } 1059 } 1060 1061 static final class StrongWriteEntry<K, V> extends StrongEntry<K, V> { StrongWriteEntry(K key, int hash, @CheckForNull ReferenceEntry<K, V> next)1062 StrongWriteEntry(K key, int hash, @CheckForNull ReferenceEntry<K, V> next) { 1063 super(key, hash, next); 1064 } 1065 1066 // The code below is exactly the same for each write entry type. 1067 1068 volatile long writeTime = Long.MAX_VALUE; 1069 1070 @Override getWriteTime()1071 public long getWriteTime() { 1072 return writeTime; 1073 } 1074 1075 @Override setWriteTime(long time)1076 public void setWriteTime(long time) { 1077 this.writeTime = time; 1078 } 1079 1080 // Guarded By Segment.this 1081 @Weak ReferenceEntry<K, V> nextWrite = nullEntry(); 1082 1083 @Override getNextInWriteQueue()1084 public ReferenceEntry<K, V> getNextInWriteQueue() { 1085 return nextWrite; 1086 } 1087 1088 @Override setNextInWriteQueue(ReferenceEntry<K, V> next)1089 public void setNextInWriteQueue(ReferenceEntry<K, V> next) { 1090 this.nextWrite = next; 1091 } 1092 1093 // Guarded By Segment.this 1094 @Weak ReferenceEntry<K, V> previousWrite = nullEntry(); 1095 1096 @Override getPreviousInWriteQueue()1097 public ReferenceEntry<K, V> getPreviousInWriteQueue() { 1098 return previousWrite; 1099 } 1100 1101 @Override setPreviousInWriteQueue(ReferenceEntry<K, V> previous)1102 public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) { 1103 this.previousWrite = previous; 1104 } 1105 } 1106 1107 static final class StrongAccessWriteEntry<K, V> extends StrongEntry<K, V> { StrongAccessWriteEntry(K key, int hash, @CheckForNull ReferenceEntry<K, V> next)1108 StrongAccessWriteEntry(K key, int hash, @CheckForNull ReferenceEntry<K, V> next) { 1109 super(key, hash, next); 1110 } 1111 1112 // The code below is exactly the same for each access entry type. 1113 1114 volatile long accessTime = Long.MAX_VALUE; 1115 1116 @Override getAccessTime()1117 public long getAccessTime() { 1118 return accessTime; 1119 } 1120 1121 @Override setAccessTime(long time)1122 public void setAccessTime(long time) { 1123 this.accessTime = time; 1124 } 1125 1126 // Guarded By Segment.this 1127 @Weak ReferenceEntry<K, V> nextAccess = nullEntry(); 1128 1129 @Override getNextInAccessQueue()1130 public ReferenceEntry<K, V> getNextInAccessQueue() { 1131 return nextAccess; 1132 } 1133 1134 @Override setNextInAccessQueue(ReferenceEntry<K, V> next)1135 public void setNextInAccessQueue(ReferenceEntry<K, V> next) { 1136 this.nextAccess = next; 1137 } 1138 1139 // Guarded By Segment.this 1140 @Weak ReferenceEntry<K, V> previousAccess = nullEntry(); 1141 1142 @Override getPreviousInAccessQueue()1143 public ReferenceEntry<K, V> getPreviousInAccessQueue() { 1144 return previousAccess; 1145 } 1146 1147 @Override setPreviousInAccessQueue(ReferenceEntry<K, V> previous)1148 public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) { 1149 this.previousAccess = previous; 1150 } 1151 1152 // The code below is exactly the same for each write entry type. 1153 1154 volatile long writeTime = Long.MAX_VALUE; 1155 1156 @Override getWriteTime()1157 public long getWriteTime() { 1158 return writeTime; 1159 } 1160 1161 @Override setWriteTime(long time)1162 public void setWriteTime(long time) { 1163 this.writeTime = time; 1164 } 1165 1166 // Guarded By Segment.this 1167 @Weak ReferenceEntry<K, V> nextWrite = nullEntry(); 1168 1169 @Override getNextInWriteQueue()1170 public ReferenceEntry<K, V> getNextInWriteQueue() { 1171 return nextWrite; 1172 } 1173 1174 @Override setNextInWriteQueue(ReferenceEntry<K, V> next)1175 public void setNextInWriteQueue(ReferenceEntry<K, V> next) { 1176 this.nextWrite = next; 1177 } 1178 1179 // Guarded By Segment.this 1180 @Weak ReferenceEntry<K, V> previousWrite = nullEntry(); 1181 1182 @Override getPreviousInWriteQueue()1183 public ReferenceEntry<K, V> getPreviousInWriteQueue() { 1184 return previousWrite; 1185 } 1186 1187 @Override setPreviousInWriteQueue(ReferenceEntry<K, V> previous)1188 public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) { 1189 this.previousWrite = previous; 1190 } 1191 } 1192 1193 /** Used for weakly-referenced keys. */ 1194 static class WeakEntry<K, V> extends WeakReference<K> implements ReferenceEntry<K, V> { WeakEntry(ReferenceQueue<K> queue, K key, int hash, @CheckForNull ReferenceEntry<K, V> next)1195 WeakEntry(ReferenceQueue<K> queue, K key, int hash, @CheckForNull ReferenceEntry<K, V> next) { 1196 super(key, queue); 1197 this.hash = hash; 1198 this.next = next; 1199 } 1200 1201 @Override getKey()1202 public K getKey() { 1203 return get(); 1204 } 1205 1206 /* 1207 * It'd be nice to get these for free from AbstractReferenceEntry, but we're already extending 1208 * WeakReference<K>. 1209 */ 1210 1211 // null access 1212 1213 @Override getAccessTime()1214 public long getAccessTime() { 1215 throw new UnsupportedOperationException(); 1216 } 1217 1218 @Override setAccessTime(long time)1219 public void setAccessTime(long time) { 1220 throw new UnsupportedOperationException(); 1221 } 1222 1223 @Override getNextInAccessQueue()1224 public ReferenceEntry<K, V> getNextInAccessQueue() { 1225 throw new UnsupportedOperationException(); 1226 } 1227 1228 @Override setNextInAccessQueue(ReferenceEntry<K, V> next)1229 public void setNextInAccessQueue(ReferenceEntry<K, V> next) { 1230 throw new UnsupportedOperationException(); 1231 } 1232 1233 @Override getPreviousInAccessQueue()1234 public ReferenceEntry<K, V> getPreviousInAccessQueue() { 1235 throw new UnsupportedOperationException(); 1236 } 1237 1238 @Override setPreviousInAccessQueue(ReferenceEntry<K, V> previous)1239 public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) { 1240 throw new UnsupportedOperationException(); 1241 } 1242 1243 // null write 1244 1245 @Override getWriteTime()1246 public long getWriteTime() { 1247 throw new UnsupportedOperationException(); 1248 } 1249 1250 @Override setWriteTime(long time)1251 public void setWriteTime(long time) { 1252 throw new UnsupportedOperationException(); 1253 } 1254 1255 @Override getNextInWriteQueue()1256 public ReferenceEntry<K, V> getNextInWriteQueue() { 1257 throw new UnsupportedOperationException(); 1258 } 1259 1260 @Override setNextInWriteQueue(ReferenceEntry<K, V> next)1261 public void setNextInWriteQueue(ReferenceEntry<K, V> next) { 1262 throw new UnsupportedOperationException(); 1263 } 1264 1265 @Override getPreviousInWriteQueue()1266 public ReferenceEntry<K, V> getPreviousInWriteQueue() { 1267 throw new UnsupportedOperationException(); 1268 } 1269 1270 @Override setPreviousInWriteQueue(ReferenceEntry<K, V> previous)1271 public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) { 1272 throw new UnsupportedOperationException(); 1273 } 1274 1275 // The code below is exactly the same for each entry type. 1276 1277 final int hash; 1278 @CheckForNull final ReferenceEntry<K, V> next; 1279 volatile ValueReference<K, V> valueReference = unset(); 1280 1281 @Override getValueReference()1282 public ValueReference<K, V> getValueReference() { 1283 return valueReference; 1284 } 1285 1286 @Override setValueReference(ValueReference<K, V> valueReference)1287 public void setValueReference(ValueReference<K, V> valueReference) { 1288 this.valueReference = valueReference; 1289 } 1290 1291 @Override getHash()1292 public int getHash() { 1293 return hash; 1294 } 1295 1296 @Override getNext()1297 public ReferenceEntry<K, V> getNext() { 1298 return next; 1299 } 1300 } 1301 1302 static final class WeakAccessEntry<K, V> extends WeakEntry<K, V> { WeakAccessEntry( ReferenceQueue<K> queue, K key, int hash, @CheckForNull ReferenceEntry<K, V> next)1303 WeakAccessEntry( 1304 ReferenceQueue<K> queue, K key, int hash, @CheckForNull ReferenceEntry<K, V> next) { 1305 super(queue, key, hash, next); 1306 } 1307 1308 // The code below is exactly the same for each access entry type. 1309 1310 volatile long accessTime = Long.MAX_VALUE; 1311 1312 @Override getAccessTime()1313 public long getAccessTime() { 1314 return accessTime; 1315 } 1316 1317 @Override setAccessTime(long time)1318 public void setAccessTime(long time) { 1319 this.accessTime = time; 1320 } 1321 1322 // Guarded By Segment.this 1323 @Weak ReferenceEntry<K, V> nextAccess = nullEntry(); 1324 1325 @Override getNextInAccessQueue()1326 public ReferenceEntry<K, V> getNextInAccessQueue() { 1327 return nextAccess; 1328 } 1329 1330 @Override setNextInAccessQueue(ReferenceEntry<K, V> next)1331 public void setNextInAccessQueue(ReferenceEntry<K, V> next) { 1332 this.nextAccess = next; 1333 } 1334 1335 // Guarded By Segment.this 1336 @Weak ReferenceEntry<K, V> previousAccess = nullEntry(); 1337 1338 @Override getPreviousInAccessQueue()1339 public ReferenceEntry<K, V> getPreviousInAccessQueue() { 1340 return previousAccess; 1341 } 1342 1343 @Override setPreviousInAccessQueue(ReferenceEntry<K, V> previous)1344 public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) { 1345 this.previousAccess = previous; 1346 } 1347 } 1348 1349 static final class WeakWriteEntry<K, V> extends WeakEntry<K, V> { WeakWriteEntry( ReferenceQueue<K> queue, K key, int hash, @CheckForNull ReferenceEntry<K, V> next)1350 WeakWriteEntry( 1351 ReferenceQueue<K> queue, K key, int hash, @CheckForNull ReferenceEntry<K, V> next) { 1352 super(queue, key, hash, next); 1353 } 1354 1355 // The code below is exactly the same for each write entry type. 1356 1357 volatile long writeTime = Long.MAX_VALUE; 1358 1359 @Override getWriteTime()1360 public long getWriteTime() { 1361 return writeTime; 1362 } 1363 1364 @Override setWriteTime(long time)1365 public void setWriteTime(long time) { 1366 this.writeTime = time; 1367 } 1368 1369 // Guarded By Segment.this 1370 @Weak ReferenceEntry<K, V> nextWrite = nullEntry(); 1371 1372 @Override getNextInWriteQueue()1373 public ReferenceEntry<K, V> getNextInWriteQueue() { 1374 return nextWrite; 1375 } 1376 1377 @Override setNextInWriteQueue(ReferenceEntry<K, V> next)1378 public void setNextInWriteQueue(ReferenceEntry<K, V> next) { 1379 this.nextWrite = next; 1380 } 1381 1382 // Guarded By Segment.this 1383 @Weak ReferenceEntry<K, V> previousWrite = nullEntry(); 1384 1385 @Override getPreviousInWriteQueue()1386 public ReferenceEntry<K, V> getPreviousInWriteQueue() { 1387 return previousWrite; 1388 } 1389 1390 @Override setPreviousInWriteQueue(ReferenceEntry<K, V> previous)1391 public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) { 1392 this.previousWrite = previous; 1393 } 1394 } 1395 1396 static final class WeakAccessWriteEntry<K, V> extends WeakEntry<K, V> { WeakAccessWriteEntry( ReferenceQueue<K> queue, K key, int hash, @CheckForNull ReferenceEntry<K, V> next)1397 WeakAccessWriteEntry( 1398 ReferenceQueue<K> queue, K key, int hash, @CheckForNull ReferenceEntry<K, V> next) { 1399 super(queue, key, hash, next); 1400 } 1401 1402 // The code below is exactly the same for each access entry type. 1403 1404 volatile long accessTime = Long.MAX_VALUE; 1405 1406 @Override getAccessTime()1407 public long getAccessTime() { 1408 return accessTime; 1409 } 1410 1411 @Override setAccessTime(long time)1412 public void setAccessTime(long time) { 1413 this.accessTime = time; 1414 } 1415 1416 // Guarded By Segment.this 1417 @Weak ReferenceEntry<K, V> nextAccess = nullEntry(); 1418 1419 @Override getNextInAccessQueue()1420 public ReferenceEntry<K, V> getNextInAccessQueue() { 1421 return nextAccess; 1422 } 1423 1424 @Override setNextInAccessQueue(ReferenceEntry<K, V> next)1425 public void setNextInAccessQueue(ReferenceEntry<K, V> next) { 1426 this.nextAccess = next; 1427 } 1428 1429 // Guarded By Segment.this 1430 @Weak ReferenceEntry<K, V> previousAccess = nullEntry(); 1431 1432 @Override getPreviousInAccessQueue()1433 public ReferenceEntry<K, V> getPreviousInAccessQueue() { 1434 return previousAccess; 1435 } 1436 1437 @Override setPreviousInAccessQueue(ReferenceEntry<K, V> previous)1438 public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) { 1439 this.previousAccess = previous; 1440 } 1441 1442 // The code below is exactly the same for each write entry type. 1443 1444 volatile long writeTime = Long.MAX_VALUE; 1445 1446 @Override getWriteTime()1447 public long getWriteTime() { 1448 return writeTime; 1449 } 1450 1451 @Override setWriteTime(long time)1452 public void setWriteTime(long time) { 1453 this.writeTime = time; 1454 } 1455 1456 // Guarded By Segment.this 1457 @Weak ReferenceEntry<K, V> nextWrite = nullEntry(); 1458 1459 @Override getNextInWriteQueue()1460 public ReferenceEntry<K, V> getNextInWriteQueue() { 1461 return nextWrite; 1462 } 1463 1464 @Override setNextInWriteQueue(ReferenceEntry<K, V> next)1465 public void setNextInWriteQueue(ReferenceEntry<K, V> next) { 1466 this.nextWrite = next; 1467 } 1468 1469 // Guarded By Segment.this 1470 @Weak ReferenceEntry<K, V> previousWrite = nullEntry(); 1471 1472 @Override getPreviousInWriteQueue()1473 public ReferenceEntry<K, V> getPreviousInWriteQueue() { 1474 return previousWrite; 1475 } 1476 1477 @Override setPreviousInWriteQueue(ReferenceEntry<K, V> previous)1478 public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) { 1479 this.previousWrite = previous; 1480 } 1481 } 1482 1483 /** References a weak value. */ 1484 static class WeakValueReference<K, V> extends WeakReference<V> implements ValueReference<K, V> { 1485 final ReferenceEntry<K, V> entry; 1486 WeakValueReference(ReferenceQueue<V> queue, V referent, ReferenceEntry<K, V> entry)1487 WeakValueReference(ReferenceQueue<V> queue, V referent, ReferenceEntry<K, V> entry) { 1488 super(referent, queue); 1489 this.entry = entry; 1490 } 1491 1492 @Override getWeight()1493 public int getWeight() { 1494 return 1; 1495 } 1496 1497 @Override getEntry()1498 public ReferenceEntry<K, V> getEntry() { 1499 return entry; 1500 } 1501 1502 @Override notifyNewValue(V newValue)1503 public void notifyNewValue(V newValue) {} 1504 1505 @Override copyFor( ReferenceQueue<V> queue, V value, ReferenceEntry<K, V> entry)1506 public ValueReference<K, V> copyFor( 1507 ReferenceQueue<V> queue, V value, ReferenceEntry<K, V> entry) { 1508 return new WeakValueReference<>(queue, value, entry); 1509 } 1510 1511 @Override isLoading()1512 public boolean isLoading() { 1513 return false; 1514 } 1515 1516 @Override isActive()1517 public boolean isActive() { 1518 return true; 1519 } 1520 1521 @Override waitForValue()1522 public V waitForValue() { 1523 return get(); 1524 } 1525 } 1526 1527 /** References a soft value. */ 1528 static class SoftValueReference<K, V> extends SoftReference<V> implements ValueReference<K, V> { 1529 final ReferenceEntry<K, V> entry; 1530 SoftValueReference(ReferenceQueue<V> queue, V referent, ReferenceEntry<K, V> entry)1531 SoftValueReference(ReferenceQueue<V> queue, V referent, ReferenceEntry<K, V> entry) { 1532 super(referent, queue); 1533 this.entry = entry; 1534 } 1535 1536 @Override getWeight()1537 public int getWeight() { 1538 return 1; 1539 } 1540 1541 @Override getEntry()1542 public ReferenceEntry<K, V> getEntry() { 1543 return entry; 1544 } 1545 1546 @Override notifyNewValue(V newValue)1547 public void notifyNewValue(V newValue) {} 1548 1549 @Override copyFor( ReferenceQueue<V> queue, V value, ReferenceEntry<K, V> entry)1550 public ValueReference<K, V> copyFor( 1551 ReferenceQueue<V> queue, V value, ReferenceEntry<K, V> entry) { 1552 return new SoftValueReference<>(queue, value, entry); 1553 } 1554 1555 @Override isLoading()1556 public boolean isLoading() { 1557 return false; 1558 } 1559 1560 @Override isActive()1561 public boolean isActive() { 1562 return true; 1563 } 1564 1565 @Override waitForValue()1566 public V waitForValue() { 1567 return get(); 1568 } 1569 } 1570 1571 /** References a strong value. */ 1572 static class StrongValueReference<K, V> implements ValueReference<K, V> { 1573 final V referent; 1574 StrongValueReference(V referent)1575 StrongValueReference(V referent) { 1576 this.referent = referent; 1577 } 1578 1579 @Override get()1580 public V get() { 1581 return referent; 1582 } 1583 1584 @Override getWeight()1585 public int getWeight() { 1586 return 1; 1587 } 1588 1589 @Override getEntry()1590 public ReferenceEntry<K, V> getEntry() { 1591 return null; 1592 } 1593 1594 @Override copyFor( ReferenceQueue<V> queue, V value, ReferenceEntry<K, V> entry)1595 public ValueReference<K, V> copyFor( 1596 ReferenceQueue<V> queue, V value, ReferenceEntry<K, V> entry) { 1597 return this; 1598 } 1599 1600 @Override isLoading()1601 public boolean isLoading() { 1602 return false; 1603 } 1604 1605 @Override isActive()1606 public boolean isActive() { 1607 return true; 1608 } 1609 1610 @Override waitForValue()1611 public V waitForValue() { 1612 return get(); 1613 } 1614 1615 @Override notifyNewValue(V newValue)1616 public void notifyNewValue(V newValue) {} 1617 } 1618 1619 /** References a weak value. */ 1620 static final class WeightedWeakValueReference<K, V> extends WeakValueReference<K, V> { 1621 final int weight; 1622 WeightedWeakValueReference( ReferenceQueue<V> queue, V referent, ReferenceEntry<K, V> entry, int weight)1623 WeightedWeakValueReference( 1624 ReferenceQueue<V> queue, V referent, ReferenceEntry<K, V> entry, int weight) { 1625 super(queue, referent, entry); 1626 this.weight = weight; 1627 } 1628 1629 @Override getWeight()1630 public int getWeight() { 1631 return weight; 1632 } 1633 1634 @Override copyFor( ReferenceQueue<V> queue, V value, ReferenceEntry<K, V> entry)1635 public ValueReference<K, V> copyFor( 1636 ReferenceQueue<V> queue, V value, ReferenceEntry<K, V> entry) { 1637 return new WeightedWeakValueReference<>(queue, value, entry, weight); 1638 } 1639 } 1640 1641 /** References a soft value. */ 1642 static final class WeightedSoftValueReference<K, V> extends SoftValueReference<K, V> { 1643 final int weight; 1644 WeightedSoftValueReference( ReferenceQueue<V> queue, V referent, ReferenceEntry<K, V> entry, int weight)1645 WeightedSoftValueReference( 1646 ReferenceQueue<V> queue, V referent, ReferenceEntry<K, V> entry, int weight) { 1647 super(queue, referent, entry); 1648 this.weight = weight; 1649 } 1650 1651 @Override getWeight()1652 public int getWeight() { 1653 return weight; 1654 } 1655 1656 @Override copyFor( ReferenceQueue<V> queue, V value, ReferenceEntry<K, V> entry)1657 public ValueReference<K, V> copyFor( 1658 ReferenceQueue<V> queue, V value, ReferenceEntry<K, V> entry) { 1659 return new WeightedSoftValueReference<>(queue, value, entry, weight); 1660 } 1661 } 1662 1663 /** References a strong value. */ 1664 static final class WeightedStrongValueReference<K, V> extends StrongValueReference<K, V> { 1665 final int weight; 1666 WeightedStrongValueReference(V referent, int weight)1667 WeightedStrongValueReference(V referent, int weight) { 1668 super(referent); 1669 this.weight = weight; 1670 } 1671 1672 @Override getWeight()1673 public int getWeight() { 1674 return weight; 1675 } 1676 } 1677 1678 /** 1679 * Applies a supplemental hash function to a given hash code, which defends against poor quality 1680 * hash functions. This is critical when the concurrent hash map uses power-of-two length hash 1681 * tables, that otherwise encounter collisions for hash codes that do not differ in lower or upper 1682 * bits. 1683 * 1684 * @param h hash code 1685 */ rehash(int h)1686 static int rehash(int h) { 1687 // Spread bits to regularize both segment and index locations, 1688 // using variant of single-word Wang/Jenkins hash. 1689 // TODO(kevinb): use Hashing/move this to Hashing? 1690 h += (h << 15) ^ 0xffffcd7d; 1691 h ^= (h >>> 10); 1692 h += (h << 3); 1693 h ^= (h >>> 6); 1694 h += (h << 2) + (h << 14); 1695 return h ^ (h >>> 16); 1696 } 1697 1698 /** 1699 * This method is a convenience for testing. Code should call {@link Segment#newEntry} directly. 1700 */ 1701 @VisibleForTesting newEntry(K key, int hash, @CheckForNull ReferenceEntry<K, V> next)1702 ReferenceEntry<K, V> newEntry(K key, int hash, @CheckForNull ReferenceEntry<K, V> next) { 1703 Segment<K, V> segment = segmentFor(hash); 1704 segment.lock(); 1705 try { 1706 return segment.newEntry(key, hash, next); 1707 } finally { 1708 segment.unlock(); 1709 } 1710 } 1711 1712 /** 1713 * This method is a convenience for testing. Code should call {@link Segment#copyEntry} directly. 1714 */ 1715 // Guarded By Segment.this 1716 @SuppressWarnings("GuardedBy") 1717 @VisibleForTesting copyEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext)1718 ReferenceEntry<K, V> copyEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) { 1719 int hash = original.getHash(); 1720 return segmentFor(hash).copyEntry(original, newNext); 1721 } 1722 1723 /** 1724 * This method is a convenience for testing. Code should call {@link Segment#setValue} instead. 1725 */ 1726 // Guarded By Segment.this 1727 @VisibleForTesting newValueReference(ReferenceEntry<K, V> entry, V value, int weight)1728 ValueReference<K, V> newValueReference(ReferenceEntry<K, V> entry, V value, int weight) { 1729 int hash = entry.getHash(); 1730 return valueStrength.referenceValue(segmentFor(hash), entry, checkNotNull(value), weight); 1731 } 1732 hash(@heckForNull Object key)1733 int hash(@CheckForNull Object key) { 1734 int h = keyEquivalence.hash(key); 1735 return rehash(h); 1736 } 1737 reclaimValue(ValueReference<K, V> valueReference)1738 void reclaimValue(ValueReference<K, V> valueReference) { 1739 ReferenceEntry<K, V> entry = valueReference.getEntry(); 1740 int hash = entry.getHash(); 1741 segmentFor(hash).reclaimValue(entry.getKey(), hash, valueReference); 1742 } 1743 reclaimKey(ReferenceEntry<K, V> entry)1744 void reclaimKey(ReferenceEntry<K, V> entry) { 1745 int hash = entry.getHash(); 1746 segmentFor(hash).reclaimKey(entry, hash); 1747 } 1748 1749 /** 1750 * This method is a convenience for testing. Code should call {@link Segment#getLiveValue} 1751 * instead. 1752 */ 1753 @VisibleForTesting isLive(ReferenceEntry<K, V> entry, long now)1754 boolean isLive(ReferenceEntry<K, V> entry, long now) { 1755 return segmentFor(entry.getHash()).getLiveValue(entry, now) != null; 1756 } 1757 1758 /** 1759 * Returns the segment that should be used for a key with the given hash. 1760 * 1761 * @param hash the hash code for the key 1762 * @return the segment 1763 */ segmentFor(int hash)1764 Segment<K, V> segmentFor(int hash) { 1765 // TODO(fry): Lazily create segments? 1766 return segments[(hash >>> segmentShift) & segmentMask]; 1767 } 1768 createSegment( int initialCapacity, long maxSegmentWeight, StatsCounter statsCounter)1769 Segment<K, V> createSegment( 1770 int initialCapacity, long maxSegmentWeight, StatsCounter statsCounter) { 1771 return new Segment<>(this, initialCapacity, maxSegmentWeight, statsCounter); 1772 } 1773 1774 /** 1775 * Gets the value from an entry. Returns null if the entry is invalid, partially-collected, 1776 * loading, or expired. Unlike {@link Segment#getLiveValue} this method does not attempt to clean 1777 * up stale entries. As such it should only be called outside a segment context, such as during 1778 * iteration. 1779 */ 1780 @CheckForNull getLiveValue(ReferenceEntry<K, V> entry, long now)1781 V getLiveValue(ReferenceEntry<K, V> entry, long now) { 1782 if (entry.getKey() == null) { 1783 return null; 1784 } 1785 V value = entry.getValueReference().get(); 1786 if (value == null) { 1787 return null; 1788 } 1789 1790 if (isExpired(entry, now)) { 1791 return null; 1792 } 1793 return value; 1794 } 1795 1796 // expiration 1797 1798 /** Returns true if the entry has expired. */ isExpired(ReferenceEntry<K, V> entry, long now)1799 boolean isExpired(ReferenceEntry<K, V> entry, long now) { 1800 checkNotNull(entry); 1801 if (expiresAfterAccess() && (now - entry.getAccessTime() >= expireAfterAccessNanos)) { 1802 return true; 1803 } 1804 if (expiresAfterWrite() && (now - entry.getWriteTime() >= expireAfterWriteNanos)) { 1805 return true; 1806 } 1807 return false; 1808 } 1809 1810 // queues 1811 1812 // Guarded By Segment.this connectAccessOrder(ReferenceEntry<K, V> previous, ReferenceEntry<K, V> next)1813 static <K, V> void connectAccessOrder(ReferenceEntry<K, V> previous, ReferenceEntry<K, V> next) { 1814 previous.setNextInAccessQueue(next); 1815 next.setPreviousInAccessQueue(previous); 1816 } 1817 1818 // Guarded By Segment.this nullifyAccessOrder(ReferenceEntry<K, V> nulled)1819 static <K, V> void nullifyAccessOrder(ReferenceEntry<K, V> nulled) { 1820 ReferenceEntry<K, V> nullEntry = nullEntry(); 1821 nulled.setNextInAccessQueue(nullEntry); 1822 nulled.setPreviousInAccessQueue(nullEntry); 1823 } 1824 1825 // Guarded By Segment.this connectWriteOrder(ReferenceEntry<K, V> previous, ReferenceEntry<K, V> next)1826 static <K, V> void connectWriteOrder(ReferenceEntry<K, V> previous, ReferenceEntry<K, V> next) { 1827 previous.setNextInWriteQueue(next); 1828 next.setPreviousInWriteQueue(previous); 1829 } 1830 1831 // Guarded By Segment.this nullifyWriteOrder(ReferenceEntry<K, V> nulled)1832 static <K, V> void nullifyWriteOrder(ReferenceEntry<K, V> nulled) { 1833 ReferenceEntry<K, V> nullEntry = nullEntry(); 1834 nulled.setNextInWriteQueue(nullEntry); 1835 nulled.setPreviousInWriteQueue(nullEntry); 1836 } 1837 1838 /** 1839 * Notifies listeners that an entry has been automatically removed due to expiration, eviction, or 1840 * eligibility for garbage collection. This should be called every time expireEntries or 1841 * evictEntry is called (once the lock is released). 1842 */ processPendingNotifications()1843 void processPendingNotifications() { 1844 RemovalNotification<K, V> notification; 1845 while ((notification = removalNotificationQueue.poll()) != null) { 1846 try { 1847 removalListener.onRemoval(notification); 1848 } catch (Throwable e) { 1849 logger.log(Level.WARNING, "Exception thrown by removal listener", e); 1850 } 1851 } 1852 } 1853 1854 @SuppressWarnings("unchecked") newSegmentArray(int ssize)1855 final Segment<K, V>[] newSegmentArray(int ssize) { 1856 return new Segment[ssize]; 1857 } 1858 1859 // Inner Classes 1860 1861 /** 1862 * Segments are specialized versions of hash tables. This subclass inherits from ReentrantLock 1863 * opportunistically, just to simplify some locking and avoid separate construction. 1864 */ 1865 @SuppressWarnings("serial") // This class is never serialized. 1866 static class Segment<K, V> extends ReentrantLock { 1867 1868 /* 1869 * TODO(fry): Consider copying variables (like evictsBySize) from outer class into this class. 1870 * It will require more memory but will reduce indirection. 1871 */ 1872 1873 /* 1874 * Segments maintain a table of entry lists that are ALWAYS kept in a consistent state, so can 1875 * be read without locking. Next fields of nodes are immutable (final). All list additions are 1876 * performed at the front of each bin. This makes it easy to check changes, and also fast to 1877 * traverse. When nodes would otherwise be changed, new nodes are created to replace them. This 1878 * works well for hash tables since the bin lists tend to be short. (The average length is less 1879 * than two.) 1880 * 1881 * Read operations can thus proceed without locking, but rely on selected uses of volatiles to 1882 * ensure that completed write operations performed by other threads are noticed. For most 1883 * purposes, the "count" field, tracking the number of elements, serves as that volatile 1884 * variable ensuring visibility. This is convenient because this field needs to be read in many 1885 * read operations anyway: 1886 * 1887 * - All (unsynchronized) read operations must first read the "count" field, and should not look 1888 * at table entries if it is 0. 1889 * 1890 * - All (synchronized) write operations should write to the "count" field after structurally 1891 * changing any bin. The operations must not take any action that could even momentarily cause a 1892 * concurrent read operation to see inconsistent data. This is made easier by the nature of the 1893 * read operations in Map. For example, no operation can reveal that the table has grown but the 1894 * threshold has not yet been updated, so there are no atomicity requirements for this with 1895 * respect to reads. 1896 * 1897 * As a guide, all critical volatile reads and writes to the count field are marked in code 1898 * comments. 1899 */ 1900 1901 @Weak final LocalCache<K, V> map; 1902 1903 /** The number of live elements in this segment's region. */ 1904 volatile int count; 1905 1906 /** The weight of the live elements in this segment's region. */ 1907 @GuardedBy("this") 1908 long totalWeight; 1909 1910 /** 1911 * Number of updates that alter the size of the table. This is used during bulk-read methods to 1912 * make sure they see a consistent snapshot: If modCounts change during a traversal of segments 1913 * loading size or checking containsValue, then we might have an inconsistent view of state so 1914 * (usually) must retry. 1915 */ 1916 int modCount; 1917 1918 /** 1919 * The table is expanded when its size exceeds this threshold. (The value of this field is 1920 * always {@code (int) (capacity * 0.75)}.) 1921 */ 1922 int threshold; 1923 1924 /** The per-segment table. */ 1925 @CheckForNull volatile AtomicReferenceArray<ReferenceEntry<K, V>> table; 1926 1927 /** The maximum weight of this segment. UNSET_INT if there is no maximum. */ 1928 final long maxSegmentWeight; 1929 1930 /** 1931 * The key reference queue contains entries whose keys have been garbage collected, and which 1932 * need to be cleaned up internally. 1933 */ 1934 @CheckForNull final ReferenceQueue<K> keyReferenceQueue; 1935 1936 /** 1937 * The value reference queue contains value references whose values have been garbage collected, 1938 * and which need to be cleaned up internally. 1939 */ 1940 @CheckForNull final ReferenceQueue<V> valueReferenceQueue; 1941 1942 /** 1943 * The recency queue is used to record which entries were accessed for updating the access 1944 * list's ordering. It is drained as a batch operation when either the DRAIN_THRESHOLD is 1945 * crossed or a write occurs on the segment. 1946 */ 1947 final Queue<ReferenceEntry<K, V>> recencyQueue; 1948 1949 /** 1950 * A counter of the number of reads since the last write, used to drain queues on a small 1951 * fraction of read operations. 1952 */ 1953 final AtomicInteger readCount = new AtomicInteger(); 1954 1955 /** 1956 * A queue of elements currently in the map, ordered by write time. Elements are added to the 1957 * tail of the queue on write. 1958 */ 1959 @GuardedBy("this") 1960 final Queue<ReferenceEntry<K, V>> writeQueue; 1961 1962 /** 1963 * A queue of elements currently in the map, ordered by access time. Elements are added to the 1964 * tail of the queue on access (note that writes count as accesses). 1965 */ 1966 @GuardedBy("this") 1967 final Queue<ReferenceEntry<K, V>> accessQueue; 1968 1969 /** Accumulates cache statistics. */ 1970 final StatsCounter statsCounter; 1971 Segment( LocalCache<K, V> map, int initialCapacity, long maxSegmentWeight, StatsCounter statsCounter)1972 Segment( 1973 LocalCache<K, V> map, 1974 int initialCapacity, 1975 long maxSegmentWeight, 1976 StatsCounter statsCounter) { 1977 this.map = map; 1978 this.maxSegmentWeight = maxSegmentWeight; 1979 this.statsCounter = checkNotNull(statsCounter); 1980 initTable(newEntryArray(initialCapacity)); 1981 1982 keyReferenceQueue = map.usesKeyReferences() ? new ReferenceQueue<>() : null; 1983 1984 valueReferenceQueue = map.usesValueReferences() ? new ReferenceQueue<>() : null; 1985 1986 recencyQueue = 1987 map.usesAccessQueue() ? new ConcurrentLinkedQueue<>() : LocalCache.discardingQueue(); 1988 1989 writeQueue = map.usesWriteQueue() ? new WriteQueue<>() : LocalCache.discardingQueue(); 1990 1991 accessQueue = map.usesAccessQueue() ? new AccessQueue<>() : LocalCache.discardingQueue(); 1992 } 1993 newEntryArray(int size)1994 AtomicReferenceArray<ReferenceEntry<K, V>> newEntryArray(int size) { 1995 return new AtomicReferenceArray<>(size); 1996 } 1997 initTable(AtomicReferenceArray<ReferenceEntry<K, V>> newTable)1998 void initTable(AtomicReferenceArray<ReferenceEntry<K, V>> newTable) { 1999 this.threshold = newTable.length() * 3 / 4; // 0.75 2000 if (!map.customWeigher() && this.threshold == maxSegmentWeight) { 2001 // prevent spurious expansion before eviction 2002 this.threshold++; 2003 } 2004 this.table = newTable; 2005 } 2006 2007 @GuardedBy("this") newEntry(K key, int hash, @CheckForNull ReferenceEntry<K, V> next)2008 ReferenceEntry<K, V> newEntry(K key, int hash, @CheckForNull ReferenceEntry<K, V> next) { 2009 return map.entryFactory.newEntry(this, checkNotNull(key), hash, next); 2010 } 2011 2012 /** 2013 * Copies {@code original} into a new entry chained to {@code newNext}. Returns the new entry, 2014 * or {@code null} if {@code original} was already garbage collected. 2015 */ 2016 @CheckForNull 2017 @GuardedBy("this") copyEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext)2018 ReferenceEntry<K, V> copyEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) { 2019 K key = original.getKey(); 2020 if (key == null) { 2021 // key collected 2022 return null; 2023 } 2024 2025 ValueReference<K, V> valueReference = original.getValueReference(); 2026 V value = valueReference.get(); 2027 if ((value == null) && valueReference.isActive()) { 2028 // value collected 2029 return null; 2030 } 2031 2032 ReferenceEntry<K, V> newEntry = map.entryFactory.copyEntry(this, original, newNext, key); 2033 newEntry.setValueReference(valueReference.copyFor(this.valueReferenceQueue, value, newEntry)); 2034 return newEntry; 2035 } 2036 2037 /** Sets a new value of an entry. Adds newly created entries at the end of the access queue. */ 2038 @GuardedBy("this") setValue(ReferenceEntry<K, V> entry, K key, V value, long now)2039 void setValue(ReferenceEntry<K, V> entry, K key, V value, long now) { 2040 ValueReference<K, V> previous = entry.getValueReference(); 2041 int weight = map.weigher.weigh(key, value); 2042 checkState(weight >= 0, "Weights must be non-negative"); 2043 2044 ValueReference<K, V> valueReference = 2045 map.valueStrength.referenceValue(this, entry, value, weight); 2046 entry.setValueReference(valueReference); 2047 recordWrite(entry, weight, now); 2048 previous.notifyNewValue(value); 2049 } 2050 2051 // loading 2052 2053 @CanIgnoreReturnValue get(K key, int hash, CacheLoader<? super K, V> loader)2054 V get(K key, int hash, CacheLoader<? super K, V> loader) throws ExecutionException { 2055 checkNotNull(key); 2056 checkNotNull(loader); 2057 try { 2058 if (count != 0) { // read-volatile 2059 // don't call getLiveEntry, which would ignore loading values 2060 ReferenceEntry<K, V> e = getEntry(key, hash); 2061 if (e != null) { 2062 long now = map.ticker.read(); 2063 V value = getLiveValue(e, now); 2064 if (value != null) { 2065 recordRead(e, now); 2066 statsCounter.recordHits(1); 2067 return scheduleRefresh(e, key, hash, value, now, loader); 2068 } 2069 ValueReference<K, V> valueReference = e.getValueReference(); 2070 if (valueReference.isLoading()) { 2071 return waitForLoadingValue(e, key, valueReference); 2072 } 2073 } 2074 } 2075 2076 // at this point e is either null or expired; 2077 return lockedGetOrLoad(key, hash, loader); 2078 } catch (ExecutionException ee) { 2079 Throwable cause = ee.getCause(); 2080 if (cause instanceof Error) { 2081 throw new ExecutionError((Error) cause); 2082 } else if (cause instanceof RuntimeException) { 2083 throw new UncheckedExecutionException(cause); 2084 } 2085 throw ee; 2086 } finally { 2087 postReadCleanup(); 2088 } 2089 } 2090 2091 @CheckForNull get(Object key, int hash)2092 V get(Object key, int hash) { 2093 try { 2094 if (count != 0) { // read-volatile 2095 long now = map.ticker.read(); 2096 ReferenceEntry<K, V> e = getLiveEntry(key, hash, now); 2097 if (e == null) { 2098 return null; 2099 } 2100 2101 V value = e.getValueReference().get(); 2102 if (value != null) { 2103 recordRead(e, now); 2104 return scheduleRefresh(e, e.getKey(), hash, value, now, map.defaultLoader); 2105 } 2106 tryDrainReferenceQueues(); 2107 } 2108 return null; 2109 } finally { 2110 postReadCleanup(); 2111 } 2112 } 2113 lockedGetOrLoad(K key, int hash, CacheLoader<? super K, V> loader)2114 V lockedGetOrLoad(K key, int hash, CacheLoader<? super K, V> loader) throws ExecutionException { 2115 ReferenceEntry<K, V> e; 2116 ValueReference<K, V> valueReference = null; 2117 LoadingValueReference<K, V> loadingValueReference = null; 2118 boolean createNewEntry = true; 2119 2120 lock(); 2121 try { 2122 // re-read ticker once inside the lock 2123 long now = map.ticker.read(); 2124 preWriteCleanup(now); 2125 2126 int newCount = this.count - 1; 2127 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2128 int index = hash & (table.length() - 1); 2129 ReferenceEntry<K, V> first = table.get(index); 2130 2131 for (e = first; e != null; e = e.getNext()) { 2132 K entryKey = e.getKey(); 2133 if (e.getHash() == hash 2134 && entryKey != null 2135 && map.keyEquivalence.equivalent(key, entryKey)) { 2136 valueReference = e.getValueReference(); 2137 if (valueReference.isLoading()) { 2138 createNewEntry = false; 2139 } else { 2140 V value = valueReference.get(); 2141 if (value == null) { 2142 enqueueNotification( 2143 entryKey, hash, value, valueReference.getWeight(), RemovalCause.COLLECTED); 2144 } else if (map.isExpired(e, now)) { 2145 // This is a duplicate check, as preWriteCleanup already purged expired 2146 // entries, but let's accommodate an incorrect expiration queue. 2147 enqueueNotification( 2148 entryKey, hash, value, valueReference.getWeight(), RemovalCause.EXPIRED); 2149 } else { 2150 recordLockedRead(e, now); 2151 statsCounter.recordHits(1); 2152 // we were concurrent with loading; don't consider refresh 2153 return value; 2154 } 2155 2156 // immediately reuse invalid entries 2157 writeQueue.remove(e); 2158 accessQueue.remove(e); 2159 this.count = newCount; // write-volatile 2160 } 2161 break; 2162 } 2163 } 2164 2165 if (createNewEntry) { 2166 loadingValueReference = new LoadingValueReference<>(); 2167 2168 if (e == null) { 2169 e = newEntry(key, hash, first); 2170 e.setValueReference(loadingValueReference); 2171 table.set(index, e); 2172 } else { 2173 e.setValueReference(loadingValueReference); 2174 } 2175 } 2176 } finally { 2177 unlock(); 2178 postWriteCleanup(); 2179 } 2180 2181 if (createNewEntry) { 2182 try { 2183 return loadSync(key, hash, loadingValueReference, loader); 2184 } finally { 2185 statsCounter.recordMisses(1); 2186 } 2187 } else { 2188 // The entry already exists. Wait for loading. 2189 return waitForLoadingValue(e, key, valueReference); 2190 } 2191 } 2192 waitForLoadingValue(ReferenceEntry<K, V> e, K key, ValueReference<K, V> valueReference)2193 V waitForLoadingValue(ReferenceEntry<K, V> e, K key, ValueReference<K, V> valueReference) 2194 throws ExecutionException { 2195 if (!valueReference.isLoading()) { 2196 throw new AssertionError(); 2197 } 2198 2199 // As of this writing, the only prod ValueReference implementation for which isLoading() is 2200 // true is LoadingValueReference. (Note, however, that not all LoadingValueReference instances 2201 // have isLoading()==true: LoadingValueReference has a subclass, ComputingValueReference, for 2202 // which isLoading() is false!) However, that might change, and we already have a *test* 2203 // implementation for which it doesn't hold. So we check instanceof to be safe. 2204 if (valueReference instanceof LoadingValueReference) { 2205 // We check whether the thread that is loading the entry is our current thread, which would 2206 // mean that we are both loading and waiting for the entry. In this case, we fail fast 2207 // instead of deadlocking. 2208 checkState( 2209 ((LoadingValueReference<K, V>) valueReference).getLoadingThread() 2210 != Thread.currentThread(), 2211 "Recursive load of: %s", 2212 key); 2213 } 2214 2215 // don't consider expiration as we're concurrent with loading 2216 try { 2217 V value = valueReference.waitForValue(); 2218 if (value == null) { 2219 throw new InvalidCacheLoadException("CacheLoader returned null for key " + key + "."); 2220 } 2221 // re-read ticker now that loading has completed 2222 long now = map.ticker.read(); 2223 recordRead(e, now); 2224 return value; 2225 } finally { 2226 statsCounter.recordMisses(1); 2227 } 2228 } 2229 2230 // at most one of loadSync/loadAsync may be called for any given LoadingValueReference 2231 loadSync( K key, int hash, LoadingValueReference<K, V> loadingValueReference, CacheLoader<? super K, V> loader)2232 V loadSync( 2233 K key, 2234 int hash, 2235 LoadingValueReference<K, V> loadingValueReference, 2236 CacheLoader<? super K, V> loader) 2237 throws ExecutionException { 2238 ListenableFuture<V> loadingFuture = loadingValueReference.loadFuture(key, loader); 2239 return getAndRecordStats(key, hash, loadingValueReference, loadingFuture); 2240 } 2241 loadAsync( final K key, final int hash, final LoadingValueReference<K, V> loadingValueReference, CacheLoader<? super K, V> loader)2242 ListenableFuture<V> loadAsync( 2243 final K key, 2244 final int hash, 2245 final LoadingValueReference<K, V> loadingValueReference, 2246 CacheLoader<? super K, V> loader) { 2247 final ListenableFuture<V> loadingFuture = loadingValueReference.loadFuture(key, loader); 2248 loadingFuture.addListener( 2249 () -> { 2250 try { 2251 getAndRecordStats(key, hash, loadingValueReference, loadingFuture); 2252 } catch (Throwable t) { 2253 logger.log(Level.WARNING, "Exception thrown during refresh", t); 2254 loadingValueReference.setException(t); 2255 } 2256 }, 2257 directExecutor()); 2258 return loadingFuture; 2259 } 2260 2261 /** Waits uninterruptibly for {@code newValue} to be loaded, and then records loading stats. */ 2262 @CanIgnoreReturnValue getAndRecordStats( K key, int hash, LoadingValueReference<K, V> loadingValueReference, ListenableFuture<V> newValue)2263 V getAndRecordStats( 2264 K key, 2265 int hash, 2266 LoadingValueReference<K, V> loadingValueReference, 2267 ListenableFuture<V> newValue) 2268 throws ExecutionException { 2269 V value = null; 2270 try { 2271 value = getUninterruptibly(newValue); 2272 if (value == null) { 2273 throw new InvalidCacheLoadException("CacheLoader returned null for key " + key + "."); 2274 } 2275 statsCounter.recordLoadSuccess(loadingValueReference.elapsedNanos()); 2276 storeLoadedValue(key, hash, loadingValueReference, value); 2277 return value; 2278 } finally { 2279 if (value == null) { 2280 statsCounter.recordLoadException(loadingValueReference.elapsedNanos()); 2281 removeLoadingValue(key, hash, loadingValueReference); 2282 } 2283 } 2284 } 2285 scheduleRefresh( ReferenceEntry<K, V> entry, K key, int hash, V oldValue, long now, CacheLoader<? super K, V> loader)2286 V scheduleRefresh( 2287 ReferenceEntry<K, V> entry, 2288 K key, 2289 int hash, 2290 V oldValue, 2291 long now, 2292 CacheLoader<? super K, V> loader) { 2293 if (map.refreshes() 2294 && (now - entry.getWriteTime() > map.refreshNanos) 2295 && !entry.getValueReference().isLoading()) { 2296 V newValue = refresh(key, hash, loader, true); 2297 if (newValue != null) { 2298 return newValue; 2299 } 2300 } 2301 return oldValue; 2302 } 2303 2304 /** 2305 * Refreshes the value associated with {@code key}, unless another thread is already doing so. 2306 * Returns the newly refreshed value associated with {@code key} if it was refreshed inline, or 2307 * {@code null} if another thread is performing the refresh or if an error occurs during 2308 * refresh. 2309 */ 2310 @CanIgnoreReturnValue 2311 @CheckForNull refresh(K key, int hash, CacheLoader<? super K, V> loader, boolean checkTime)2312 V refresh(K key, int hash, CacheLoader<? super K, V> loader, boolean checkTime) { 2313 final LoadingValueReference<K, V> loadingValueReference = 2314 insertLoadingValueReference(key, hash, checkTime); 2315 if (loadingValueReference == null) { 2316 return null; 2317 } 2318 2319 ListenableFuture<V> result = loadAsync(key, hash, loadingValueReference, loader); 2320 if (result.isDone()) { 2321 try { 2322 return Uninterruptibles.getUninterruptibly(result); 2323 } catch (Throwable t) { 2324 // don't let refresh exceptions propagate; error was already logged 2325 } 2326 } 2327 return null; 2328 } 2329 2330 /** 2331 * Returns a newly inserted {@code LoadingValueReference}, or null if the live value reference 2332 * is already loading. 2333 */ 2334 @CheckForNull insertLoadingValueReference( final K key, final int hash, boolean checkTime)2335 LoadingValueReference<K, V> insertLoadingValueReference( 2336 final K key, final int hash, boolean checkTime) { 2337 ReferenceEntry<K, V> e = null; 2338 lock(); 2339 try { 2340 long now = map.ticker.read(); 2341 preWriteCleanup(now); 2342 2343 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2344 int index = hash & (table.length() - 1); 2345 ReferenceEntry<K, V> first = table.get(index); 2346 2347 // Look for an existing entry. 2348 for (e = first; e != null; e = e.getNext()) { 2349 K entryKey = e.getKey(); 2350 if (e.getHash() == hash 2351 && entryKey != null 2352 && map.keyEquivalence.equivalent(key, entryKey)) { 2353 // We found an existing entry. 2354 2355 ValueReference<K, V> valueReference = e.getValueReference(); 2356 if (valueReference.isLoading() 2357 || (checkTime && (now - e.getWriteTime() < map.refreshNanos))) { 2358 // refresh is a no-op if loading is pending 2359 // if checkTime, we want to check *after* acquiring the lock if refresh still needs 2360 // to be scheduled 2361 return null; 2362 } 2363 2364 // continue returning old value while loading 2365 ++modCount; 2366 LoadingValueReference<K, V> loadingValueReference = 2367 new LoadingValueReference<>(valueReference); 2368 e.setValueReference(loadingValueReference); 2369 return loadingValueReference; 2370 } 2371 } 2372 2373 ++modCount; 2374 LoadingValueReference<K, V> loadingValueReference = new LoadingValueReference<>(); 2375 e = newEntry(key, hash, first); 2376 e.setValueReference(loadingValueReference); 2377 table.set(index, e); 2378 return loadingValueReference; 2379 } finally { 2380 unlock(); 2381 postWriteCleanup(); 2382 } 2383 } 2384 2385 // reference queues, for garbage collection cleanup 2386 2387 /** Cleanup collected entries when the lock is available. */ tryDrainReferenceQueues()2388 void tryDrainReferenceQueues() { 2389 if (tryLock()) { 2390 try { 2391 drainReferenceQueues(); 2392 } finally { 2393 unlock(); 2394 } 2395 } 2396 } 2397 2398 /** 2399 * Drain the key and value reference queues, cleaning up internal entries containing garbage 2400 * collected keys or values. 2401 */ 2402 @GuardedBy("this") drainReferenceQueues()2403 void drainReferenceQueues() { 2404 if (map.usesKeyReferences()) { 2405 drainKeyReferenceQueue(); 2406 } 2407 if (map.usesValueReferences()) { 2408 drainValueReferenceQueue(); 2409 } 2410 } 2411 2412 @GuardedBy("this") drainKeyReferenceQueue()2413 void drainKeyReferenceQueue() { 2414 Reference<? extends K> ref; 2415 int i = 0; 2416 while ((ref = keyReferenceQueue.poll()) != null) { 2417 @SuppressWarnings("unchecked") 2418 ReferenceEntry<K, V> entry = (ReferenceEntry<K, V>) ref; 2419 map.reclaimKey(entry); 2420 if (++i == DRAIN_MAX) { 2421 break; 2422 } 2423 } 2424 } 2425 2426 @GuardedBy("this") drainValueReferenceQueue()2427 void drainValueReferenceQueue() { 2428 Reference<? extends V> ref; 2429 int i = 0; 2430 while ((ref = valueReferenceQueue.poll()) != null) { 2431 @SuppressWarnings("unchecked") 2432 ValueReference<K, V> valueReference = (ValueReference<K, V>) ref; 2433 map.reclaimValue(valueReference); 2434 if (++i == DRAIN_MAX) { 2435 break; 2436 } 2437 } 2438 } 2439 2440 /** Clears all entries from the key and value reference queues. */ clearReferenceQueues()2441 void clearReferenceQueues() { 2442 if (map.usesKeyReferences()) { 2443 clearKeyReferenceQueue(); 2444 } 2445 if (map.usesValueReferences()) { 2446 clearValueReferenceQueue(); 2447 } 2448 } 2449 clearKeyReferenceQueue()2450 void clearKeyReferenceQueue() { 2451 while (keyReferenceQueue.poll() != null) {} 2452 } 2453 clearValueReferenceQueue()2454 void clearValueReferenceQueue() { 2455 while (valueReferenceQueue.poll() != null) {} 2456 } 2457 2458 // recency queue, shared by expiration and eviction 2459 2460 /** 2461 * Records the relative order in which this read was performed by adding {@code entry} to the 2462 * recency queue. At write-time, or when the queue is full past the threshold, the queue will be 2463 * drained and the entries therein processed. 2464 * 2465 * <p>Note: locked reads should use {@link #recordLockedRead}. 2466 */ recordRead(ReferenceEntry<K, V> entry, long now)2467 void recordRead(ReferenceEntry<K, V> entry, long now) { 2468 if (map.recordsAccess()) { 2469 entry.setAccessTime(now); 2470 } 2471 recencyQueue.add(entry); 2472 } 2473 2474 /** 2475 * Updates the eviction metadata that {@code entry} was just read. This currently amounts to 2476 * adding {@code entry} to relevant eviction lists. 2477 * 2478 * <p>Note: this method should only be called under lock, as it directly manipulates the 2479 * eviction queues. Unlocked reads should use {@link #recordRead}. 2480 */ 2481 @GuardedBy("this") recordLockedRead(ReferenceEntry<K, V> entry, long now)2482 void recordLockedRead(ReferenceEntry<K, V> entry, long now) { 2483 if (map.recordsAccess()) { 2484 entry.setAccessTime(now); 2485 } 2486 accessQueue.add(entry); 2487 } 2488 2489 /** 2490 * Updates eviction metadata that {@code entry} was just written. This currently amounts to 2491 * adding {@code entry} to relevant eviction lists. 2492 */ 2493 @GuardedBy("this") recordWrite(ReferenceEntry<K, V> entry, int weight, long now)2494 void recordWrite(ReferenceEntry<K, V> entry, int weight, long now) { 2495 // we are already under lock, so drain the recency queue immediately 2496 drainRecencyQueue(); 2497 totalWeight += weight; 2498 2499 if (map.recordsAccess()) { 2500 entry.setAccessTime(now); 2501 } 2502 if (map.recordsWrite()) { 2503 entry.setWriteTime(now); 2504 } 2505 accessQueue.add(entry); 2506 writeQueue.add(entry); 2507 } 2508 2509 /** 2510 * Drains the recency queue, updating eviction metadata that the entries therein were read in 2511 * the specified relative order. This currently amounts to adding them to relevant eviction 2512 * lists (accounting for the fact that they could have been removed from the map since being 2513 * added to the recency queue). 2514 */ 2515 @GuardedBy("this") drainRecencyQueue()2516 void drainRecencyQueue() { 2517 ReferenceEntry<K, V> e; 2518 while ((e = recencyQueue.poll()) != null) { 2519 // An entry may be in the recency queue despite it being removed from 2520 // the map . This can occur when the entry was concurrently read while a 2521 // writer is removing it from the segment or after a clear has removed 2522 // all the segment's entries. 2523 if (accessQueue.contains(e)) { 2524 accessQueue.add(e); 2525 } 2526 } 2527 } 2528 2529 // expiration 2530 2531 /** Cleanup expired entries when the lock is available. */ tryExpireEntries(long now)2532 void tryExpireEntries(long now) { 2533 if (tryLock()) { 2534 try { 2535 expireEntries(now); 2536 } finally { 2537 unlock(); 2538 // don't call postWriteCleanup as we're in a read 2539 } 2540 } 2541 } 2542 2543 @GuardedBy("this") expireEntries(long now)2544 void expireEntries(long now) { 2545 drainRecencyQueue(); 2546 2547 ReferenceEntry<K, V> e; 2548 while ((e = writeQueue.peek()) != null && map.isExpired(e, now)) { 2549 if (!removeEntry(e, e.getHash(), RemovalCause.EXPIRED)) { 2550 throw new AssertionError(); 2551 } 2552 } 2553 while ((e = accessQueue.peek()) != null && map.isExpired(e, now)) { 2554 if (!removeEntry(e, e.getHash(), RemovalCause.EXPIRED)) { 2555 throw new AssertionError(); 2556 } 2557 } 2558 } 2559 2560 // eviction 2561 2562 @GuardedBy("this") enqueueNotification( @heckForNull K key, int hash, @CheckForNull V value, int weight, RemovalCause cause)2563 void enqueueNotification( 2564 @CheckForNull K key, int hash, @CheckForNull V value, int weight, RemovalCause cause) { 2565 totalWeight -= weight; 2566 if (cause.wasEvicted()) { 2567 statsCounter.recordEviction(); 2568 } 2569 if (map.removalNotificationQueue != DISCARDING_QUEUE) { 2570 RemovalNotification<K, V> notification = RemovalNotification.create(key, value, cause); 2571 map.removalNotificationQueue.offer(notification); 2572 } 2573 } 2574 2575 /** 2576 * Performs eviction if the segment is over capacity. Avoids flushing the entire cache if the 2577 * newest entry exceeds the maximum weight all on its own. 2578 * 2579 * @param newest the most recently added entry 2580 */ 2581 @GuardedBy("this") evictEntries(ReferenceEntry<K, V> newest)2582 void evictEntries(ReferenceEntry<K, V> newest) { 2583 if (!map.evictsBySize()) { 2584 return; 2585 } 2586 2587 drainRecencyQueue(); 2588 2589 // If the newest entry by itself is too heavy for the segment, don't bother evicting 2590 // anything else, just that 2591 if (newest.getValueReference().getWeight() > maxSegmentWeight) { 2592 if (!removeEntry(newest, newest.getHash(), RemovalCause.SIZE)) { 2593 throw new AssertionError(); 2594 } 2595 } 2596 2597 while (totalWeight > maxSegmentWeight) { 2598 ReferenceEntry<K, V> e = getNextEvictable(); 2599 if (!removeEntry(e, e.getHash(), RemovalCause.SIZE)) { 2600 throw new AssertionError(); 2601 } 2602 } 2603 } 2604 2605 // TODO(fry): instead implement this with an eviction head 2606 @GuardedBy("this") getNextEvictable()2607 ReferenceEntry<K, V> getNextEvictable() { 2608 for (ReferenceEntry<K, V> e : accessQueue) { 2609 int weight = e.getValueReference().getWeight(); 2610 if (weight > 0) { 2611 return e; 2612 } 2613 } 2614 throw new AssertionError(); 2615 } 2616 2617 /** Returns first entry of bin for given hash. */ getFirst(int hash)2618 ReferenceEntry<K, V> getFirst(int hash) { 2619 // read this volatile field only once 2620 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2621 return table.get(hash & (table.length() - 1)); 2622 } 2623 2624 // Specialized implementations of map methods 2625 2626 @CheckForNull getEntry(Object key, int hash)2627 ReferenceEntry<K, V> getEntry(Object key, int hash) { 2628 for (ReferenceEntry<K, V> e = getFirst(hash); e != null; e = e.getNext()) { 2629 if (e.getHash() != hash) { 2630 continue; 2631 } 2632 2633 K entryKey = e.getKey(); 2634 if (entryKey == null) { 2635 tryDrainReferenceQueues(); 2636 continue; 2637 } 2638 2639 if (map.keyEquivalence.equivalent(key, entryKey)) { 2640 return e; 2641 } 2642 } 2643 2644 return null; 2645 } 2646 2647 @CheckForNull getLiveEntry(Object key, int hash, long now)2648 ReferenceEntry<K, V> getLiveEntry(Object key, int hash, long now) { 2649 ReferenceEntry<K, V> e = getEntry(key, hash); 2650 if (e == null) { 2651 return null; 2652 } else if (map.isExpired(e, now)) { 2653 tryExpireEntries(now); 2654 return null; 2655 } 2656 return e; 2657 } 2658 2659 /** 2660 * Gets the value from an entry. Returns null if the entry is invalid, partially-collected, 2661 * loading, or expired. 2662 */ getLiveValue(ReferenceEntry<K, V> entry, long now)2663 V getLiveValue(ReferenceEntry<K, V> entry, long now) { 2664 if (entry.getKey() == null) { 2665 tryDrainReferenceQueues(); 2666 return null; 2667 } 2668 V value = entry.getValueReference().get(); 2669 if (value == null) { 2670 tryDrainReferenceQueues(); 2671 return null; 2672 } 2673 2674 if (map.isExpired(entry, now)) { 2675 tryExpireEntries(now); 2676 return null; 2677 } 2678 return value; 2679 } 2680 containsKey(Object key, int hash)2681 boolean containsKey(Object key, int hash) { 2682 try { 2683 if (count != 0) { // read-volatile 2684 long now = map.ticker.read(); 2685 ReferenceEntry<K, V> e = getLiveEntry(key, hash, now); 2686 if (e == null) { 2687 return false; 2688 } 2689 return e.getValueReference().get() != null; 2690 } 2691 2692 return false; 2693 } finally { 2694 postReadCleanup(); 2695 } 2696 } 2697 2698 /** 2699 * This method is a convenience for testing. Code should call {@link LocalCache#containsValue} 2700 * directly. 2701 */ 2702 @VisibleForTesting containsValue(Object value)2703 boolean containsValue(Object value) { 2704 try { 2705 if (count != 0) { // read-volatile 2706 long now = map.ticker.read(); 2707 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2708 int length = table.length(); 2709 for (int i = 0; i < length; ++i) { 2710 for (ReferenceEntry<K, V> e = table.get(i); e != null; e = e.getNext()) { 2711 V entryValue = getLiveValue(e, now); 2712 if (entryValue == null) { 2713 continue; 2714 } 2715 if (map.valueEquivalence.equivalent(value, entryValue)) { 2716 return true; 2717 } 2718 } 2719 } 2720 } 2721 2722 return false; 2723 } finally { 2724 postReadCleanup(); 2725 } 2726 } 2727 2728 @CanIgnoreReturnValue 2729 @CheckForNull put(K key, int hash, V value, boolean onlyIfAbsent)2730 V put(K key, int hash, V value, boolean onlyIfAbsent) { 2731 lock(); 2732 try { 2733 long now = map.ticker.read(); 2734 preWriteCleanup(now); 2735 2736 int newCount = this.count + 1; 2737 if (newCount > this.threshold) { // ensure capacity 2738 expand(); 2739 newCount = this.count + 1; 2740 } 2741 2742 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2743 int index = hash & (table.length() - 1); 2744 ReferenceEntry<K, V> first = table.get(index); 2745 2746 // Look for an existing entry. 2747 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 2748 K entryKey = e.getKey(); 2749 if (e.getHash() == hash 2750 && entryKey != null 2751 && map.keyEquivalence.equivalent(key, entryKey)) { 2752 // We found an existing entry. 2753 2754 ValueReference<K, V> valueReference = e.getValueReference(); 2755 V entryValue = valueReference.get(); 2756 2757 if (entryValue == null) { 2758 ++modCount; 2759 if (valueReference.isActive()) { 2760 enqueueNotification( 2761 key, hash, entryValue, valueReference.getWeight(), RemovalCause.COLLECTED); 2762 setValue(e, key, value, now); 2763 newCount = this.count; // count remains unchanged 2764 } else { 2765 setValue(e, key, value, now); 2766 newCount = this.count + 1; 2767 } 2768 this.count = newCount; // write-volatile 2769 evictEntries(e); 2770 return null; 2771 } else if (onlyIfAbsent) { 2772 // Mimic 2773 // "if (!map.containsKey(key)) ... 2774 // else return map.get(key); 2775 recordLockedRead(e, now); 2776 return entryValue; 2777 } else { 2778 // clobber existing entry, count remains unchanged 2779 ++modCount; 2780 enqueueNotification( 2781 key, hash, entryValue, valueReference.getWeight(), RemovalCause.REPLACED); 2782 setValue(e, key, value, now); 2783 evictEntries(e); 2784 return entryValue; 2785 } 2786 } 2787 } 2788 2789 // Create a new entry. 2790 ++modCount; 2791 ReferenceEntry<K, V> newEntry = newEntry(key, hash, first); 2792 setValue(newEntry, key, value, now); 2793 table.set(index, newEntry); 2794 newCount = this.count + 1; 2795 this.count = newCount; // write-volatile 2796 evictEntries(newEntry); 2797 return null; 2798 } finally { 2799 unlock(); 2800 postWriteCleanup(); 2801 } 2802 } 2803 2804 /** Expands the table if possible. */ 2805 @GuardedBy("this") expand()2806 void expand() { 2807 AtomicReferenceArray<ReferenceEntry<K, V>> oldTable = table; 2808 int oldCapacity = oldTable.length(); 2809 if (oldCapacity >= MAXIMUM_CAPACITY) { 2810 return; 2811 } 2812 2813 /* 2814 * Reclassify nodes in each list to new Map. Because we are using power-of-two expansion, the 2815 * elements from each bin must either stay at same index, or move with a power of two offset. 2816 * We eliminate unnecessary node creation by catching cases where old nodes can be reused 2817 * because their next fields won't change. Statistically, at the default threshold, only about 2818 * one-sixth of them need cloning when a table doubles. The nodes they replace will be garbage 2819 * collectable as soon as they are no longer referenced by any reader thread that may be in 2820 * the midst of traversing table right now. 2821 */ 2822 2823 int newCount = count; 2824 AtomicReferenceArray<ReferenceEntry<K, V>> newTable = newEntryArray(oldCapacity << 1); 2825 threshold = newTable.length() * 3 / 4; 2826 int newMask = newTable.length() - 1; 2827 for (int oldIndex = 0; oldIndex < oldCapacity; ++oldIndex) { 2828 // We need to guarantee that any existing reads of old Map can 2829 // proceed. So we cannot yet null out each bin. 2830 ReferenceEntry<K, V> head = oldTable.get(oldIndex); 2831 2832 if (head != null) { 2833 ReferenceEntry<K, V> next = head.getNext(); 2834 int headIndex = head.getHash() & newMask; 2835 2836 // Single node on list 2837 if (next == null) { 2838 newTable.set(headIndex, head); 2839 } else { 2840 // Reuse the consecutive sequence of nodes with the same target 2841 // index from the end of the list. tail points to the first 2842 // entry in the reusable list. 2843 ReferenceEntry<K, V> tail = head; 2844 int tailIndex = headIndex; 2845 for (ReferenceEntry<K, V> e = next; e != null; e = e.getNext()) { 2846 int newIndex = e.getHash() & newMask; 2847 if (newIndex != tailIndex) { 2848 // The index changed. We'll need to copy the previous entry. 2849 tailIndex = newIndex; 2850 tail = e; 2851 } 2852 } 2853 newTable.set(tailIndex, tail); 2854 2855 // Clone nodes leading up to the tail. 2856 for (ReferenceEntry<K, V> e = head; e != tail; e = e.getNext()) { 2857 int newIndex = e.getHash() & newMask; 2858 ReferenceEntry<K, V> newNext = newTable.get(newIndex); 2859 ReferenceEntry<K, V> newFirst = copyEntry(e, newNext); 2860 if (newFirst != null) { 2861 newTable.set(newIndex, newFirst); 2862 } else { 2863 removeCollectedEntry(e); 2864 newCount--; 2865 } 2866 } 2867 } 2868 } 2869 } 2870 table = newTable; 2871 this.count = newCount; 2872 } 2873 replace(K key, int hash, V oldValue, V newValue)2874 boolean replace(K key, int hash, V oldValue, V newValue) { 2875 lock(); 2876 try { 2877 long now = map.ticker.read(); 2878 preWriteCleanup(now); 2879 2880 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2881 int index = hash & (table.length() - 1); 2882 ReferenceEntry<K, V> first = table.get(index); 2883 2884 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 2885 K entryKey = e.getKey(); 2886 if (e.getHash() == hash 2887 && entryKey != null 2888 && map.keyEquivalence.equivalent(key, entryKey)) { 2889 ValueReference<K, V> valueReference = e.getValueReference(); 2890 V entryValue = valueReference.get(); 2891 if (entryValue == null) { 2892 if (valueReference.isActive()) { 2893 // If the value disappeared, this entry is partially collected. 2894 int newCount = this.count - 1; 2895 ++modCount; 2896 ReferenceEntry<K, V> newFirst = 2897 removeValueFromChain( 2898 first, 2899 e, 2900 entryKey, 2901 hash, 2902 entryValue, 2903 valueReference, 2904 RemovalCause.COLLECTED); 2905 newCount = this.count - 1; 2906 table.set(index, newFirst); 2907 this.count = newCount; // write-volatile 2908 } 2909 return false; 2910 } 2911 2912 if (map.valueEquivalence.equivalent(oldValue, entryValue)) { 2913 ++modCount; 2914 enqueueNotification( 2915 key, hash, entryValue, valueReference.getWeight(), RemovalCause.REPLACED); 2916 setValue(e, key, newValue, now); 2917 evictEntries(e); 2918 return true; 2919 } else { 2920 // Mimic 2921 // "if (map.containsKey(key) && map.get(key).equals(oldValue))..." 2922 recordLockedRead(e, now); 2923 return false; 2924 } 2925 } 2926 } 2927 2928 return false; 2929 } finally { 2930 unlock(); 2931 postWriteCleanup(); 2932 } 2933 } 2934 2935 @CheckForNull replace(K key, int hash, V newValue)2936 V replace(K key, int hash, V newValue) { 2937 lock(); 2938 try { 2939 long now = map.ticker.read(); 2940 preWriteCleanup(now); 2941 2942 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2943 int index = hash & (table.length() - 1); 2944 ReferenceEntry<K, V> first = table.get(index); 2945 2946 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 2947 K entryKey = e.getKey(); 2948 if (e.getHash() == hash 2949 && entryKey != null 2950 && map.keyEquivalence.equivalent(key, entryKey)) { 2951 ValueReference<K, V> valueReference = e.getValueReference(); 2952 V entryValue = valueReference.get(); 2953 if (entryValue == null) { 2954 if (valueReference.isActive()) { 2955 // If the value disappeared, this entry is partially collected. 2956 int newCount = this.count - 1; 2957 ++modCount; 2958 ReferenceEntry<K, V> newFirst = 2959 removeValueFromChain( 2960 first, 2961 e, 2962 entryKey, 2963 hash, 2964 entryValue, 2965 valueReference, 2966 RemovalCause.COLLECTED); 2967 newCount = this.count - 1; 2968 table.set(index, newFirst); 2969 this.count = newCount; // write-volatile 2970 } 2971 return null; 2972 } 2973 2974 ++modCount; 2975 enqueueNotification( 2976 key, hash, entryValue, valueReference.getWeight(), RemovalCause.REPLACED); 2977 setValue(e, key, newValue, now); 2978 evictEntries(e); 2979 return entryValue; 2980 } 2981 } 2982 2983 return null; 2984 } finally { 2985 unlock(); 2986 postWriteCleanup(); 2987 } 2988 } 2989 2990 @CheckForNull remove(Object key, int hash)2991 V remove(Object key, int hash) { 2992 lock(); 2993 try { 2994 long now = map.ticker.read(); 2995 preWriteCleanup(now); 2996 2997 int newCount = this.count - 1; 2998 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 2999 int index = hash & (table.length() - 1); 3000 ReferenceEntry<K, V> first = table.get(index); 3001 3002 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 3003 K entryKey = e.getKey(); 3004 if (e.getHash() == hash 3005 && entryKey != null 3006 && map.keyEquivalence.equivalent(key, entryKey)) { 3007 ValueReference<K, V> valueReference = e.getValueReference(); 3008 V entryValue = valueReference.get(); 3009 3010 RemovalCause cause; 3011 if (entryValue != null) { 3012 cause = RemovalCause.EXPLICIT; 3013 } else if (valueReference.isActive()) { 3014 cause = RemovalCause.COLLECTED; 3015 } else { 3016 // currently loading 3017 return null; 3018 } 3019 3020 ++modCount; 3021 ReferenceEntry<K, V> newFirst = 3022 removeValueFromChain(first, e, entryKey, hash, entryValue, valueReference, cause); 3023 newCount = this.count - 1; 3024 table.set(index, newFirst); 3025 this.count = newCount; // write-volatile 3026 return entryValue; 3027 } 3028 } 3029 3030 return null; 3031 } finally { 3032 unlock(); 3033 postWriteCleanup(); 3034 } 3035 } 3036 remove(Object key, int hash, Object value)3037 boolean remove(Object key, int hash, Object value) { 3038 lock(); 3039 try { 3040 long now = map.ticker.read(); 3041 preWriteCleanup(now); 3042 3043 int newCount = this.count - 1; 3044 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 3045 int index = hash & (table.length() - 1); 3046 ReferenceEntry<K, V> first = table.get(index); 3047 3048 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 3049 K entryKey = e.getKey(); 3050 if (e.getHash() == hash 3051 && entryKey != null 3052 && map.keyEquivalence.equivalent(key, entryKey)) { 3053 ValueReference<K, V> valueReference = e.getValueReference(); 3054 V entryValue = valueReference.get(); 3055 3056 RemovalCause cause; 3057 if (map.valueEquivalence.equivalent(value, entryValue)) { 3058 cause = RemovalCause.EXPLICIT; 3059 } else if (entryValue == null && valueReference.isActive()) { 3060 cause = RemovalCause.COLLECTED; 3061 } else { 3062 // currently loading 3063 return false; 3064 } 3065 3066 ++modCount; 3067 ReferenceEntry<K, V> newFirst = 3068 removeValueFromChain(first, e, entryKey, hash, entryValue, valueReference, cause); 3069 newCount = this.count - 1; 3070 table.set(index, newFirst); 3071 this.count = newCount; // write-volatile 3072 return (cause == RemovalCause.EXPLICIT); 3073 } 3074 } 3075 3076 return false; 3077 } finally { 3078 unlock(); 3079 postWriteCleanup(); 3080 } 3081 } 3082 3083 @CanIgnoreReturnValue storeLoadedValue( K key, int hash, LoadingValueReference<K, V> oldValueReference, V newValue)3084 boolean storeLoadedValue( 3085 K key, int hash, LoadingValueReference<K, V> oldValueReference, V newValue) { 3086 lock(); 3087 try { 3088 long now = map.ticker.read(); 3089 preWriteCleanup(now); 3090 3091 int newCount = this.count + 1; 3092 if (newCount > this.threshold) { // ensure capacity 3093 expand(); 3094 newCount = this.count + 1; 3095 } 3096 3097 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 3098 int index = hash & (table.length() - 1); 3099 ReferenceEntry<K, V> first = table.get(index); 3100 3101 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 3102 K entryKey = e.getKey(); 3103 if (e.getHash() == hash 3104 && entryKey != null 3105 && map.keyEquivalence.equivalent(key, entryKey)) { 3106 ValueReference<K, V> valueReference = e.getValueReference(); 3107 V entryValue = valueReference.get(); 3108 // replace the old LoadingValueReference if it's live, otherwise 3109 // perform a putIfAbsent 3110 if (oldValueReference == valueReference 3111 || (entryValue == null && valueReference != UNSET)) { 3112 ++modCount; 3113 if (oldValueReference.isActive()) { 3114 RemovalCause cause = 3115 (entryValue == null) ? RemovalCause.COLLECTED : RemovalCause.REPLACED; 3116 enqueueNotification(key, hash, entryValue, oldValueReference.getWeight(), cause); 3117 newCount--; 3118 } 3119 setValue(e, key, newValue, now); 3120 this.count = newCount; // write-volatile 3121 evictEntries(e); 3122 return true; 3123 } 3124 3125 // the loaded value was already clobbered 3126 enqueueNotification(key, hash, newValue, 0, RemovalCause.REPLACED); 3127 return false; 3128 } 3129 } 3130 3131 ++modCount; 3132 ReferenceEntry<K, V> newEntry = newEntry(key, hash, first); 3133 setValue(newEntry, key, newValue, now); 3134 table.set(index, newEntry); 3135 this.count = newCount; // write-volatile 3136 evictEntries(newEntry); 3137 return true; 3138 } finally { 3139 unlock(); 3140 postWriteCleanup(); 3141 } 3142 } 3143 clear()3144 void clear() { 3145 if (count != 0) { // read-volatile 3146 lock(); 3147 try { 3148 long now = map.ticker.read(); 3149 preWriteCleanup(now); 3150 3151 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 3152 for (int i = 0; i < table.length(); ++i) { 3153 for (ReferenceEntry<K, V> e = table.get(i); e != null; e = e.getNext()) { 3154 // Loading references aren't actually in the map yet. 3155 if (e.getValueReference().isActive()) { 3156 K key = e.getKey(); 3157 V value = e.getValueReference().get(); 3158 RemovalCause cause = 3159 (key == null || value == null) ? RemovalCause.COLLECTED : RemovalCause.EXPLICIT; 3160 enqueueNotification( 3161 key, e.getHash(), value, e.getValueReference().getWeight(), cause); 3162 } 3163 } 3164 } 3165 for (int i = 0; i < table.length(); ++i) { 3166 table.set(i, null); 3167 } 3168 clearReferenceQueues(); 3169 writeQueue.clear(); 3170 accessQueue.clear(); 3171 readCount.set(0); 3172 3173 ++modCount; 3174 count = 0; // write-volatile 3175 } finally { 3176 unlock(); 3177 postWriteCleanup(); 3178 } 3179 } 3180 } 3181 3182 @GuardedBy("this") 3183 @CheckForNull removeValueFromChain( ReferenceEntry<K, V> first, ReferenceEntry<K, V> entry, @CheckForNull K key, int hash, V value, ValueReference<K, V> valueReference, RemovalCause cause)3184 ReferenceEntry<K, V> removeValueFromChain( 3185 ReferenceEntry<K, V> first, 3186 ReferenceEntry<K, V> entry, 3187 @CheckForNull K key, 3188 int hash, 3189 V value, 3190 ValueReference<K, V> valueReference, 3191 RemovalCause cause) { 3192 enqueueNotification(key, hash, value, valueReference.getWeight(), cause); 3193 writeQueue.remove(entry); 3194 accessQueue.remove(entry); 3195 3196 if (valueReference.isLoading()) { 3197 valueReference.notifyNewValue(null); 3198 return first; 3199 } else { 3200 return removeEntryFromChain(first, entry); 3201 } 3202 } 3203 3204 @GuardedBy("this") 3205 @CheckForNull removeEntryFromChain( ReferenceEntry<K, V> first, ReferenceEntry<K, V> entry)3206 ReferenceEntry<K, V> removeEntryFromChain( 3207 ReferenceEntry<K, V> first, ReferenceEntry<K, V> entry) { 3208 int newCount = count; 3209 ReferenceEntry<K, V> newFirst = entry.getNext(); 3210 for (ReferenceEntry<K, V> e = first; e != entry; e = e.getNext()) { 3211 ReferenceEntry<K, V> next = copyEntry(e, newFirst); 3212 if (next != null) { 3213 newFirst = next; 3214 } else { 3215 removeCollectedEntry(e); 3216 newCount--; 3217 } 3218 } 3219 this.count = newCount; 3220 return newFirst; 3221 } 3222 3223 @GuardedBy("this") removeCollectedEntry(ReferenceEntry<K, V> entry)3224 void removeCollectedEntry(ReferenceEntry<K, V> entry) { 3225 enqueueNotification( 3226 entry.getKey(), 3227 entry.getHash(), 3228 entry.getValueReference().get(), 3229 entry.getValueReference().getWeight(), 3230 RemovalCause.COLLECTED); 3231 writeQueue.remove(entry); 3232 accessQueue.remove(entry); 3233 } 3234 3235 /** Removes an entry whose key has been garbage collected. */ 3236 @CanIgnoreReturnValue reclaimKey(ReferenceEntry<K, V> entry, int hash)3237 boolean reclaimKey(ReferenceEntry<K, V> entry, int hash) { 3238 lock(); 3239 try { 3240 int newCount = count - 1; 3241 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 3242 int index = hash & (table.length() - 1); 3243 ReferenceEntry<K, V> first = table.get(index); 3244 3245 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 3246 if (e == entry) { 3247 ++modCount; 3248 ReferenceEntry<K, V> newFirst = 3249 removeValueFromChain( 3250 first, 3251 e, 3252 e.getKey(), 3253 hash, 3254 e.getValueReference().get(), 3255 e.getValueReference(), 3256 RemovalCause.COLLECTED); 3257 newCount = this.count - 1; 3258 table.set(index, newFirst); 3259 this.count = newCount; // write-volatile 3260 return true; 3261 } 3262 } 3263 3264 return false; 3265 } finally { 3266 unlock(); 3267 postWriteCleanup(); 3268 } 3269 } 3270 3271 /** Removes an entry whose value has been garbage collected. */ 3272 @CanIgnoreReturnValue reclaimValue(K key, int hash, ValueReference<K, V> valueReference)3273 boolean reclaimValue(K key, int hash, ValueReference<K, V> valueReference) { 3274 lock(); 3275 try { 3276 int newCount = this.count - 1; 3277 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 3278 int index = hash & (table.length() - 1); 3279 ReferenceEntry<K, V> first = table.get(index); 3280 3281 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 3282 K entryKey = e.getKey(); 3283 if (e.getHash() == hash 3284 && entryKey != null 3285 && map.keyEquivalence.equivalent(key, entryKey)) { 3286 ValueReference<K, V> v = e.getValueReference(); 3287 if (v == valueReference) { 3288 ++modCount; 3289 ReferenceEntry<K, V> newFirst = 3290 removeValueFromChain( 3291 first, 3292 e, 3293 entryKey, 3294 hash, 3295 valueReference.get(), 3296 valueReference, 3297 RemovalCause.COLLECTED); 3298 newCount = this.count - 1; 3299 table.set(index, newFirst); 3300 this.count = newCount; // write-volatile 3301 return true; 3302 } 3303 return false; 3304 } 3305 } 3306 3307 return false; 3308 } finally { 3309 unlock(); 3310 if (!isHeldByCurrentThread()) { // don't clean up inside of put 3311 postWriteCleanup(); 3312 } 3313 } 3314 } 3315 3316 @CanIgnoreReturnValue removeLoadingValue(K key, int hash, LoadingValueReference<K, V> valueReference)3317 boolean removeLoadingValue(K key, int hash, LoadingValueReference<K, V> valueReference) { 3318 lock(); 3319 try { 3320 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 3321 int index = hash & (table.length() - 1); 3322 ReferenceEntry<K, V> first = table.get(index); 3323 3324 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 3325 K entryKey = e.getKey(); 3326 if (e.getHash() == hash 3327 && entryKey != null 3328 && map.keyEquivalence.equivalent(key, entryKey)) { 3329 ValueReference<K, V> v = e.getValueReference(); 3330 if (v == valueReference) { 3331 if (valueReference.isActive()) { 3332 e.setValueReference(valueReference.getOldValue()); 3333 } else { 3334 ReferenceEntry<K, V> newFirst = removeEntryFromChain(first, e); 3335 table.set(index, newFirst); 3336 } 3337 return true; 3338 } 3339 return false; 3340 } 3341 } 3342 3343 return false; 3344 } finally { 3345 unlock(); 3346 postWriteCleanup(); 3347 } 3348 } 3349 3350 @VisibleForTesting 3351 @GuardedBy("this") 3352 @CanIgnoreReturnValue removeEntry(ReferenceEntry<K, V> entry, int hash, RemovalCause cause)3353 boolean removeEntry(ReferenceEntry<K, V> entry, int hash, RemovalCause cause) { 3354 int newCount = this.count - 1; 3355 AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table; 3356 int index = hash & (table.length() - 1); 3357 ReferenceEntry<K, V> first = table.get(index); 3358 3359 for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) { 3360 if (e == entry) { 3361 ++modCount; 3362 ReferenceEntry<K, V> newFirst = 3363 removeValueFromChain( 3364 first, 3365 e, 3366 e.getKey(), 3367 hash, 3368 e.getValueReference().get(), 3369 e.getValueReference(), 3370 cause); 3371 newCount = this.count - 1; 3372 table.set(index, newFirst); 3373 this.count = newCount; // write-volatile 3374 return true; 3375 } 3376 } 3377 3378 return false; 3379 } 3380 3381 /** 3382 * Performs routine cleanup following a read. Normally cleanup happens during writes. If cleanup 3383 * is not observed after a sufficient number of reads, try cleaning up from the read thread. 3384 */ postReadCleanup()3385 void postReadCleanup() { 3386 if ((readCount.incrementAndGet() & DRAIN_THRESHOLD) == 0) { 3387 cleanUp(); 3388 } 3389 } 3390 3391 /** 3392 * Performs routine cleanup prior to executing a write. This should be called every time a write 3393 * thread acquires the segment lock, immediately after acquiring the lock. 3394 * 3395 * <p>Post-condition: expireEntries has been run. 3396 */ 3397 @GuardedBy("this") preWriteCleanup(long now)3398 void preWriteCleanup(long now) { 3399 runLockedCleanup(now); 3400 } 3401 3402 /** Performs routine cleanup following a write. */ postWriteCleanup()3403 void postWriteCleanup() { 3404 runUnlockedCleanup(); 3405 } 3406 cleanUp()3407 void cleanUp() { 3408 long now = map.ticker.read(); 3409 runLockedCleanup(now); 3410 runUnlockedCleanup(); 3411 } 3412 runLockedCleanup(long now)3413 void runLockedCleanup(long now) { 3414 if (tryLock()) { 3415 try { 3416 drainReferenceQueues(); 3417 expireEntries(now); // calls drainRecencyQueue 3418 readCount.set(0); 3419 } finally { 3420 unlock(); 3421 } 3422 } 3423 } 3424 runUnlockedCleanup()3425 void runUnlockedCleanup() { 3426 // locked cleanup may generate notifications we can send unlocked 3427 if (!isHeldByCurrentThread()) { 3428 map.processPendingNotifications(); 3429 } 3430 } 3431 } 3432 3433 static class LoadingValueReference<K, V> implements ValueReference<K, V> { 3434 volatile ValueReference<K, V> oldValue; 3435 3436 // TODO(fry): rename get, then extend AbstractFuture instead of containing SettableFuture 3437 final SettableFuture<V> futureValue = SettableFuture.create(); 3438 final Stopwatch stopwatch = Stopwatch.createUnstarted(); 3439 3440 final Thread loadingThread; 3441 LoadingValueReference()3442 public LoadingValueReference() { 3443 this(LocalCache.<K, V>unset()); 3444 } 3445 3446 /* 3447 * TODO(cpovirk): Consider making this implementation closer to the mainline implementation. 3448 * (The difference was introduced as part of Java-8-specific changes in cl/132882204, but we 3449 * could probably make *some* of those changes here in the backport, too.) 3450 */ LoadingValueReference(ValueReference<K, V> oldValue)3451 public LoadingValueReference(ValueReference<K, V> oldValue) { 3452 this.oldValue = oldValue; 3453 this.loadingThread = Thread.currentThread(); 3454 } 3455 3456 @Override isLoading()3457 public boolean isLoading() { 3458 return true; 3459 } 3460 3461 @Override isActive()3462 public boolean isActive() { 3463 return oldValue.isActive(); 3464 } 3465 3466 @Override getWeight()3467 public int getWeight() { 3468 return oldValue.getWeight(); 3469 } 3470 3471 @CanIgnoreReturnValue set(@heckForNull V newValue)3472 public boolean set(@CheckForNull V newValue) { 3473 return futureValue.set(newValue); 3474 } 3475 3476 @CanIgnoreReturnValue setException(Throwable t)3477 public boolean setException(Throwable t) { 3478 return futureValue.setException(t); 3479 } 3480 fullyFailedFuture(Throwable t)3481 private ListenableFuture<V> fullyFailedFuture(Throwable t) { 3482 return Futures.immediateFailedFuture(t); 3483 } 3484 3485 @Override notifyNewValue(@heckForNull V newValue)3486 public void notifyNewValue(@CheckForNull V newValue) { 3487 if (newValue != null) { 3488 // The pending load was clobbered by a manual write. 3489 // Unblock all pending gets, and have them return the new value. 3490 set(newValue); 3491 } else { 3492 // The pending load was removed. Delay notifications until loading completes. 3493 oldValue = unset(); 3494 } 3495 3496 // TODO(fry): could also cancel loading if we had a handle on its future 3497 } 3498 loadFuture(K key, CacheLoader<? super K, V> loader)3499 public ListenableFuture<V> loadFuture(K key, CacheLoader<? super K, V> loader) { 3500 try { 3501 stopwatch.start(); 3502 V previousValue = oldValue.get(); 3503 if (previousValue == null) { 3504 V newValue = loader.load(key); 3505 return set(newValue) ? futureValue : Futures.immediateFuture(newValue); 3506 } 3507 ListenableFuture<V> newValue = loader.reload(key, previousValue); 3508 if (newValue == null) { 3509 return Futures.immediateFuture(null); 3510 } 3511 // To avoid a race, make sure the refreshed value is set into loadingValueReference 3512 // *before* returning newValue from the cache query. 3513 return transform( 3514 newValue, 3515 newResult -> { 3516 LoadingValueReference.this.set(newResult); 3517 return newResult; 3518 }, 3519 directExecutor()); 3520 } catch (Throwable t) { 3521 ListenableFuture<V> result = setException(t) ? futureValue : fullyFailedFuture(t); 3522 if (t instanceof InterruptedException) { 3523 Thread.currentThread().interrupt(); 3524 } 3525 return result; 3526 } 3527 } 3528 elapsedNanos()3529 public long elapsedNanos() { 3530 return stopwatch.elapsed(NANOSECONDS); 3531 } 3532 3533 @Override waitForValue()3534 public V waitForValue() throws ExecutionException { 3535 return getUninterruptibly(futureValue); 3536 } 3537 3538 @Override get()3539 public V get() { 3540 return oldValue.get(); 3541 } 3542 getOldValue()3543 public ValueReference<K, V> getOldValue() { 3544 return oldValue; 3545 } 3546 3547 @Override getEntry()3548 public ReferenceEntry<K, V> getEntry() { 3549 return null; 3550 } 3551 3552 @Override copyFor( ReferenceQueue<V> queue, @CheckForNull V value, ReferenceEntry<K, V> entry)3553 public ValueReference<K, V> copyFor( 3554 ReferenceQueue<V> queue, @CheckForNull V value, ReferenceEntry<K, V> entry) { 3555 return this; 3556 } 3557 getLoadingThread()3558 Thread getLoadingThread() { 3559 return this.loadingThread; 3560 } 3561 } 3562 3563 // Queues 3564 3565 /** 3566 * A custom queue for managing eviction order. Note that this is tightly integrated with {@code 3567 * ReferenceEntry}, upon which it relies to perform its linking. 3568 * 3569 * <p>Note that this entire implementation makes the assumption that all elements which are in the 3570 * map are also in this queue, and that all elements not in the queue are not in the map. 3571 * 3572 * <p>The benefits of creating our own queue are that (1) we can replace elements in the middle of 3573 * the queue as part of copyWriteEntry, and (2) the contains method is highly optimized for the 3574 * current model. 3575 */ 3576 static final class WriteQueue<K, V> extends AbstractQueue<ReferenceEntry<K, V>> { 3577 final ReferenceEntry<K, V> head = 3578 new AbstractReferenceEntry<K, V>() { 3579 3580 @Override 3581 public long getWriteTime() { 3582 return Long.MAX_VALUE; 3583 } 3584 3585 @Override 3586 public void setWriteTime(long time) {} 3587 3588 @Weak ReferenceEntry<K, V> nextWrite = this; 3589 3590 @Override 3591 public ReferenceEntry<K, V> getNextInWriteQueue() { 3592 return nextWrite; 3593 } 3594 3595 @Override 3596 public void setNextInWriteQueue(ReferenceEntry<K, V> next) { 3597 this.nextWrite = next; 3598 } 3599 3600 @Weak ReferenceEntry<K, V> previousWrite = this; 3601 3602 @Override 3603 public ReferenceEntry<K, V> getPreviousInWriteQueue() { 3604 return previousWrite; 3605 } 3606 3607 @Override 3608 public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) { 3609 this.previousWrite = previous; 3610 } 3611 }; 3612 3613 // implements Queue 3614 3615 @Override offer(ReferenceEntry<K, V> entry)3616 public boolean offer(ReferenceEntry<K, V> entry) { 3617 // unlink 3618 connectWriteOrder(entry.getPreviousInWriteQueue(), entry.getNextInWriteQueue()); 3619 3620 // add to tail 3621 connectWriteOrder(head.getPreviousInWriteQueue(), entry); 3622 connectWriteOrder(entry, head); 3623 3624 return true; 3625 } 3626 3627 @CheckForNull 3628 @Override peek()3629 public ReferenceEntry<K, V> peek() { 3630 ReferenceEntry<K, V> next = head.getNextInWriteQueue(); 3631 return (next == head) ? null : next; 3632 } 3633 3634 @CheckForNull 3635 @Override poll()3636 public ReferenceEntry<K, V> poll() { 3637 ReferenceEntry<K, V> next = head.getNextInWriteQueue(); 3638 if (next == head) { 3639 return null; 3640 } 3641 3642 remove(next); 3643 return next; 3644 } 3645 3646 @Override 3647 @SuppressWarnings("unchecked") 3648 @CanIgnoreReturnValue remove(Object o)3649 public boolean remove(Object o) { 3650 ReferenceEntry<K, V> e = (ReferenceEntry<K, V>) o; 3651 ReferenceEntry<K, V> previous = e.getPreviousInWriteQueue(); 3652 ReferenceEntry<K, V> next = e.getNextInWriteQueue(); 3653 connectWriteOrder(previous, next); 3654 nullifyWriteOrder(e); 3655 3656 return next != NullEntry.INSTANCE; 3657 } 3658 3659 @Override 3660 @SuppressWarnings("unchecked") contains(Object o)3661 public boolean contains(Object o) { 3662 ReferenceEntry<K, V> e = (ReferenceEntry<K, V>) o; 3663 return e.getNextInWriteQueue() != NullEntry.INSTANCE; 3664 } 3665 3666 @Override isEmpty()3667 public boolean isEmpty() { 3668 return head.getNextInWriteQueue() == head; 3669 } 3670 3671 @Override size()3672 public int size() { 3673 int size = 0; 3674 for (ReferenceEntry<K, V> e = head.getNextInWriteQueue(); 3675 e != head; 3676 e = e.getNextInWriteQueue()) { 3677 size++; 3678 } 3679 return size; 3680 } 3681 3682 @Override clear()3683 public void clear() { 3684 ReferenceEntry<K, V> e = head.getNextInWriteQueue(); 3685 while (e != head) { 3686 ReferenceEntry<K, V> next = e.getNextInWriteQueue(); 3687 nullifyWriteOrder(e); 3688 e = next; 3689 } 3690 3691 head.setNextInWriteQueue(head); 3692 head.setPreviousInWriteQueue(head); 3693 } 3694 3695 @Override iterator()3696 public Iterator<ReferenceEntry<K, V>> iterator() { 3697 return new AbstractSequentialIterator<ReferenceEntry<K, V>>(peek()) { 3698 @CheckForNull 3699 @Override 3700 protected ReferenceEntry<K, V> computeNext(ReferenceEntry<K, V> previous) { 3701 ReferenceEntry<K, V> next = previous.getNextInWriteQueue(); 3702 return (next == head) ? null : next; 3703 } 3704 }; 3705 } 3706 } 3707 3708 /** 3709 * A custom queue for managing access order. Note that this is tightly integrated with {@code 3710 * ReferenceEntry}, upon which it relies to perform its linking. 3711 * 3712 * <p>Note that this entire implementation makes the assumption that all elements which are in the 3713 * map are also in this queue, and that all elements not in the queue are not in the map. 3714 * 3715 * <p>The benefits of creating our own queue are that (1) we can replace elements in the middle of 3716 * the queue as part of copyWriteEntry, and (2) the contains method is highly optimized for the 3717 * current model. 3718 */ 3719 static final class AccessQueue<K, V> extends AbstractQueue<ReferenceEntry<K, V>> { 3720 final ReferenceEntry<K, V> head = 3721 new AbstractReferenceEntry<K, V>() { 3722 3723 @Override 3724 public long getAccessTime() { 3725 return Long.MAX_VALUE; 3726 } 3727 3728 @Override 3729 public void setAccessTime(long time) {} 3730 3731 @Weak ReferenceEntry<K, V> nextAccess = this; 3732 3733 @Override 3734 public ReferenceEntry<K, V> getNextInAccessQueue() { 3735 return nextAccess; 3736 } 3737 3738 @Override 3739 public void setNextInAccessQueue(ReferenceEntry<K, V> next) { 3740 this.nextAccess = next; 3741 } 3742 3743 @Weak ReferenceEntry<K, V> previousAccess = this; 3744 3745 @Override 3746 public ReferenceEntry<K, V> getPreviousInAccessQueue() { 3747 return previousAccess; 3748 } 3749 3750 @Override 3751 public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) { 3752 this.previousAccess = previous; 3753 } 3754 }; 3755 3756 // implements Queue 3757 3758 @Override 3759 public boolean offer(ReferenceEntry<K, V> entry) { 3760 // unlink 3761 connectAccessOrder(entry.getPreviousInAccessQueue(), entry.getNextInAccessQueue()); 3762 3763 // add to tail 3764 connectAccessOrder(head.getPreviousInAccessQueue(), entry); 3765 connectAccessOrder(entry, head); 3766 3767 return true; 3768 } 3769 3770 @CheckForNull 3771 @Override 3772 public ReferenceEntry<K, V> peek() { 3773 ReferenceEntry<K, V> next = head.getNextInAccessQueue(); 3774 return (next == head) ? null : next; 3775 } 3776 3777 @CheckForNull 3778 @Override 3779 public ReferenceEntry<K, V> poll() { 3780 ReferenceEntry<K, V> next = head.getNextInAccessQueue(); 3781 if (next == head) { 3782 return null; 3783 } 3784 3785 remove(next); 3786 return next; 3787 } 3788 3789 @Override 3790 @SuppressWarnings("unchecked") 3791 @CanIgnoreReturnValue 3792 public boolean remove(Object o) { 3793 ReferenceEntry<K, V> e = (ReferenceEntry<K, V>) o; 3794 ReferenceEntry<K, V> previous = e.getPreviousInAccessQueue(); 3795 ReferenceEntry<K, V> next = e.getNextInAccessQueue(); 3796 connectAccessOrder(previous, next); 3797 nullifyAccessOrder(e); 3798 3799 return next != NullEntry.INSTANCE; 3800 } 3801 3802 @Override 3803 @SuppressWarnings("unchecked") 3804 public boolean contains(Object o) { 3805 ReferenceEntry<K, V> e = (ReferenceEntry<K, V>) o; 3806 return e.getNextInAccessQueue() != NullEntry.INSTANCE; 3807 } 3808 3809 @Override 3810 public boolean isEmpty() { 3811 return head.getNextInAccessQueue() == head; 3812 } 3813 3814 @Override 3815 public int size() { 3816 int size = 0; 3817 for (ReferenceEntry<K, V> e = head.getNextInAccessQueue(); 3818 e != head; 3819 e = e.getNextInAccessQueue()) { 3820 size++; 3821 } 3822 return size; 3823 } 3824 3825 @Override 3826 public void clear() { 3827 ReferenceEntry<K, V> e = head.getNextInAccessQueue(); 3828 while (e != head) { 3829 ReferenceEntry<K, V> next = e.getNextInAccessQueue(); 3830 nullifyAccessOrder(e); 3831 e = next; 3832 } 3833 3834 head.setNextInAccessQueue(head); 3835 head.setPreviousInAccessQueue(head); 3836 } 3837 3838 @Override 3839 public Iterator<ReferenceEntry<K, V>> iterator() { 3840 return new AbstractSequentialIterator<ReferenceEntry<K, V>>(peek()) { 3841 @CheckForNull 3842 @Override 3843 protected ReferenceEntry<K, V> computeNext(ReferenceEntry<K, V> previous) { 3844 ReferenceEntry<K, V> next = previous.getNextInAccessQueue(); 3845 return (next == head) ? null : next; 3846 } 3847 }; 3848 } 3849 } 3850 3851 // Cache support 3852 3853 public void cleanUp() { 3854 for (Segment<?, ?> segment : segments) { 3855 segment.cleanUp(); 3856 } 3857 } 3858 3859 // ConcurrentMap methods 3860 3861 @Override 3862 public boolean isEmpty() { 3863 /* 3864 * Sum per-segment modCounts to avoid mis-reporting when elements are concurrently added and 3865 * removed in one segment while checking another, in which case the table was never actually 3866 * empty at any point. (The sum ensures accuracy up through at least 1<<31 per-segment 3867 * modifications before recheck.) Method containsValue() uses similar constructions for 3868 * stability checks. 3869 */ 3870 long sum = 0L; 3871 Segment<K, V>[] segments = this.segments; 3872 for (Segment<K, V> segment : segments) { 3873 if (segment.count != 0) { 3874 return false; 3875 } 3876 sum += segment.modCount; 3877 } 3878 3879 if (sum != 0L) { // recheck unless no modifications 3880 for (Segment<K, V> segment : segments) { 3881 if (segment.count != 0) { 3882 return false; 3883 } 3884 sum -= segment.modCount; 3885 } 3886 return sum == 0L; 3887 } 3888 return true; 3889 } 3890 3891 long longSize() { 3892 Segment<K, V>[] segments = this.segments; 3893 long sum = 0; 3894 for (Segment<K, V> segment : segments) { 3895 sum += Math.max(0, segment.count); // see https://github.com/google/guava/issues/2108 3896 } 3897 return sum; 3898 } 3899 3900 @Override 3901 public int size() { 3902 return Ints.saturatedCast(longSize()); 3903 } 3904 3905 @CanIgnoreReturnValue // TODO(b/27479612): consider removing this 3906 @Override 3907 @CheckForNull 3908 public V get(@CheckForNull Object key) { 3909 if (key == null) { 3910 return null; 3911 } 3912 int hash = hash(key); 3913 return segmentFor(hash).get(key, hash); 3914 } 3915 3916 @CanIgnoreReturnValue // TODO(b/27479612): consider removing this 3917 V get(K key, CacheLoader<? super K, V> loader) throws ExecutionException { 3918 int hash = hash(checkNotNull(key)); 3919 return segmentFor(hash).get(key, hash, loader); 3920 } 3921 3922 @CheckForNull 3923 public V getIfPresent(Object key) { 3924 int hash = hash(checkNotNull(key)); 3925 V value = segmentFor(hash).get(key, hash); 3926 if (value == null) { 3927 globalStatsCounter.recordMisses(1); 3928 } else { 3929 globalStatsCounter.recordHits(1); 3930 } 3931 return value; 3932 } 3933 3934 @Override 3935 @CheckForNull 3936 public V getOrDefault(@CheckForNull Object key, @CheckForNull V defaultValue) { 3937 V result = get(key); 3938 return (result != null) ? result : defaultValue; 3939 } 3940 3941 V getOrLoad(K key) throws ExecutionException { 3942 return get(key, defaultLoader); 3943 } 3944 3945 ImmutableMap<K, V> getAllPresent(Iterable<?> keys) { 3946 int hits = 0; 3947 int misses = 0; 3948 3949 ImmutableMap.Builder<K, V> result = ImmutableMap.builder(); 3950 for (Object key : keys) { 3951 V value = get(key); 3952 if (value == null) { 3953 misses++; 3954 } else { 3955 // TODO(fry): store entry key instead of query key 3956 @SuppressWarnings("unchecked") 3957 K castKey = (K) key; 3958 result.put(castKey, value); 3959 hits++; 3960 } 3961 } 3962 globalStatsCounter.recordHits(hits); 3963 globalStatsCounter.recordMisses(misses); 3964 return result.buildKeepingLast(); 3965 } 3966 3967 ImmutableMap<K, V> getAll(Iterable<? extends K> keys) throws ExecutionException { 3968 int hits = 0; 3969 int misses = 0; 3970 3971 Map<K, V> result = Maps.newLinkedHashMap(); 3972 Set<K> keysToLoad = Sets.newLinkedHashSet(); 3973 for (K key : keys) { 3974 V value = get(key); 3975 if (!result.containsKey(key)) { 3976 result.put(key, value); 3977 if (value == null) { 3978 misses++; 3979 keysToLoad.add(key); 3980 } else { 3981 hits++; 3982 } 3983 } 3984 } 3985 3986 try { 3987 if (!keysToLoad.isEmpty()) { 3988 try { 3989 Map<K, V> newEntries = loadAll(unmodifiableSet(keysToLoad), defaultLoader); 3990 for (K key : keysToLoad) { 3991 V value = newEntries.get(key); 3992 if (value == null) { 3993 throw new InvalidCacheLoadException("loadAll failed to return a value for " + key); 3994 } 3995 result.put(key, value); 3996 } 3997 } catch (UnsupportedLoadingOperationException e) { 3998 // loadAll not implemented, fallback to load 3999 for (K key : keysToLoad) { 4000 misses--; // get will count this miss 4001 result.put(key, get(key, defaultLoader)); 4002 } 4003 } 4004 } 4005 return ImmutableMap.copyOf(result); 4006 } finally { 4007 globalStatsCounter.recordHits(hits); 4008 globalStatsCounter.recordMisses(misses); 4009 } 4010 } 4011 4012 /** 4013 * Returns the result of calling {@link CacheLoader#loadAll}, or null if {@code loader} doesn't 4014 * implement {@code loadAll}. 4015 */ 4016 @CheckForNull 4017 Map<K, V> loadAll(Set<? extends K> keys, CacheLoader<? super K, V> loader) 4018 throws ExecutionException { 4019 checkNotNull(loader); 4020 checkNotNull(keys); 4021 Stopwatch stopwatch = Stopwatch.createStarted(); 4022 Map<K, V> result; 4023 boolean success = false; 4024 try { 4025 @SuppressWarnings("unchecked") // safe since all keys extend K 4026 Map<K, V> map = (Map<K, V>) loader.loadAll(keys); 4027 result = map; 4028 success = true; 4029 } catch (UnsupportedLoadingOperationException e) { 4030 success = true; 4031 throw e; 4032 } catch (InterruptedException e) { 4033 Thread.currentThread().interrupt(); 4034 throw new ExecutionException(e); 4035 } catch (RuntimeException e) { 4036 throw new UncheckedExecutionException(e); 4037 } catch (Exception e) { 4038 throw new ExecutionException(e); 4039 } catch (Error e) { 4040 throw new ExecutionError(e); 4041 } finally { 4042 if (!success) { 4043 globalStatsCounter.recordLoadException(stopwatch.elapsed(NANOSECONDS)); 4044 } 4045 } 4046 4047 if (result == null) { 4048 globalStatsCounter.recordLoadException(stopwatch.elapsed(NANOSECONDS)); 4049 throw new InvalidCacheLoadException(loader + " returned null map from loadAll"); 4050 } 4051 4052 stopwatch.stop(); 4053 // TODO(fry): batch by segment 4054 boolean nullsPresent = false; 4055 for (Entry<K, V> entry : result.entrySet()) { 4056 K key = entry.getKey(); 4057 V value = entry.getValue(); 4058 if (key == null || value == null) { 4059 // delay failure until non-null entries are stored 4060 nullsPresent = true; 4061 } else { 4062 put(key, value); 4063 } 4064 } 4065 4066 if (nullsPresent) { 4067 globalStatsCounter.recordLoadException(stopwatch.elapsed(NANOSECONDS)); 4068 throw new InvalidCacheLoadException(loader + " returned null keys or values from loadAll"); 4069 } 4070 4071 // TODO(fry): record count of loaded entries 4072 globalStatsCounter.recordLoadSuccess(stopwatch.elapsed(NANOSECONDS)); 4073 return result; 4074 } 4075 4076 /** 4077 * Returns the internal entry for the specified key. The entry may be loading, expired, or 4078 * partially collected. 4079 */ 4080 @CheckForNull 4081 ReferenceEntry<K, V> getEntry(@CheckForNull Object key) { 4082 // does not impact recency ordering 4083 if (key == null) { 4084 return null; 4085 } 4086 int hash = hash(key); 4087 return segmentFor(hash).getEntry(key, hash); 4088 } 4089 4090 void refresh(K key) { 4091 int hash = hash(checkNotNull(key)); 4092 segmentFor(hash).refresh(key, hash, defaultLoader, false); 4093 } 4094 4095 @Override 4096 public boolean containsKey(@CheckForNull Object key) { 4097 // does not impact recency ordering 4098 if (key == null) { 4099 return false; 4100 } 4101 int hash = hash(key); 4102 return segmentFor(hash).containsKey(key, hash); 4103 } 4104 4105 @Override 4106 public boolean containsValue(@CheckForNull Object value) { 4107 // does not impact recency ordering 4108 if (value == null) { 4109 return false; 4110 } 4111 4112 // This implementation is patterned after ConcurrentHashMap, but without the locking. The only 4113 // way for it to return a false negative would be for the target value to jump around in the map 4114 // such that none of the subsequent iterations observed it, despite the fact that at every point 4115 // in time it was present somewhere int the map. This becomes increasingly unlikely as 4116 // CONTAINS_VALUE_RETRIES increases, though without locking it is theoretically possible. 4117 long now = ticker.read(); 4118 final Segment<K, V>[] segments = this.segments; 4119 long last = -1L; 4120 for (int i = 0; i < CONTAINS_VALUE_RETRIES; i++) { 4121 long sum = 0L; 4122 for (Segment<K, V> segment : segments) { 4123 // ensure visibility of most recent completed write 4124 int unused = segment.count; // read-volatile 4125 4126 AtomicReferenceArray<ReferenceEntry<K, V>> table = segment.table; 4127 for (int j = 0; j < table.length(); j++) { 4128 for (ReferenceEntry<K, V> e = table.get(j); e != null; e = e.getNext()) { 4129 V v = segment.getLiveValue(e, now); 4130 if (v != null && valueEquivalence.equivalent(value, v)) { 4131 return true; 4132 } 4133 } 4134 } 4135 sum += segment.modCount; 4136 } 4137 if (sum == last) { 4138 break; 4139 } 4140 last = sum; 4141 } 4142 return false; 4143 } 4144 4145 @CheckForNull 4146 @CanIgnoreReturnValue 4147 @Override 4148 public V put(K key, V value) { 4149 checkNotNull(key); 4150 checkNotNull(value); 4151 int hash = hash(key); 4152 return segmentFor(hash).put(key, hash, value, false); 4153 } 4154 4155 @CheckForNull 4156 @Override 4157 public V putIfAbsent(K key, V value) { 4158 checkNotNull(key); 4159 checkNotNull(value); 4160 int hash = hash(key); 4161 return segmentFor(hash).put(key, hash, value, true); 4162 } 4163 4164 @Override 4165 public void putAll(Map<? extends K, ? extends V> m) { 4166 for (Entry<? extends K, ? extends V> e : m.entrySet()) { 4167 put(e.getKey(), e.getValue()); 4168 } 4169 } 4170 4171 @CheckForNull 4172 @CanIgnoreReturnValue 4173 @Override 4174 public V remove(@CheckForNull Object key) { 4175 if (key == null) { 4176 return null; 4177 } 4178 int hash = hash(key); 4179 return segmentFor(hash).remove(key, hash); 4180 } 4181 4182 @CanIgnoreReturnValue 4183 @Override 4184 public boolean remove(@CheckForNull Object key, @CheckForNull Object value) { 4185 if (key == null || value == null) { 4186 return false; 4187 } 4188 int hash = hash(key); 4189 return segmentFor(hash).remove(key, hash, value); 4190 } 4191 4192 @CanIgnoreReturnValue 4193 @Override 4194 public boolean replace(K key, @CheckForNull V oldValue, V newValue) { 4195 checkNotNull(key); 4196 checkNotNull(newValue); 4197 if (oldValue == null) { 4198 return false; 4199 } 4200 int hash = hash(key); 4201 return segmentFor(hash).replace(key, hash, oldValue, newValue); 4202 } 4203 4204 @CheckForNull 4205 @CanIgnoreReturnValue 4206 @Override 4207 public V replace(K key, V value) { 4208 checkNotNull(key); 4209 checkNotNull(value); 4210 int hash = hash(key); 4211 return segmentFor(hash).replace(key, hash, value); 4212 } 4213 4214 @Override 4215 public void clear() { 4216 for (Segment<K, V> segment : segments) { 4217 segment.clear(); 4218 } 4219 } 4220 4221 void invalidateAll(Iterable<?> keys) { 4222 // TODO(fry): batch by segment 4223 for (Object key : keys) { 4224 remove(key); 4225 } 4226 } 4227 4228 @LazyInit @RetainedWith @CheckForNull Set<K> keySet; 4229 4230 @Override 4231 public Set<K> keySet() { 4232 // does not impact recency ordering 4233 Set<K> ks = keySet; 4234 return (ks != null) ? ks : (keySet = new KeySet()); 4235 } 4236 4237 @LazyInit @RetainedWith @CheckForNull Collection<V> values; 4238 4239 @Override 4240 public Collection<V> values() { 4241 // does not impact recency ordering 4242 Collection<V> vs = values; 4243 return (vs != null) ? vs : (values = new Values()); 4244 } 4245 4246 @LazyInit @RetainedWith @CheckForNull Set<Entry<K, V>> entrySet; 4247 4248 @Override 4249 @GwtIncompatible // Not supported. 4250 public Set<Entry<K, V>> entrySet() { 4251 // does not impact recency ordering 4252 Set<Entry<K, V>> es = entrySet; 4253 return (es != null) ? es : (entrySet = new EntrySet()); 4254 } 4255 4256 // Iterator Support 4257 4258 abstract class HashIterator<T> implements Iterator<T> { 4259 4260 int nextSegmentIndex; 4261 int nextTableIndex; 4262 @CheckForNull Segment<K, V> currentSegment; 4263 @CheckForNull AtomicReferenceArray<ReferenceEntry<K, V>> currentTable; 4264 @CheckForNull ReferenceEntry<K, V> nextEntry; 4265 @CheckForNull WriteThroughEntry nextExternal; 4266 @CheckForNull WriteThroughEntry lastReturned; 4267 4268 HashIterator() { 4269 nextSegmentIndex = segments.length - 1; 4270 nextTableIndex = -1; 4271 advance(); 4272 } 4273 4274 @Override 4275 public abstract T next(); 4276 4277 final void advance() { 4278 nextExternal = null; 4279 4280 if (nextInChain()) { 4281 return; 4282 } 4283 4284 if (nextInTable()) { 4285 return; 4286 } 4287 4288 while (nextSegmentIndex >= 0) { 4289 currentSegment = segments[nextSegmentIndex--]; 4290 if (currentSegment.count != 0) { 4291 currentTable = currentSegment.table; 4292 nextTableIndex = currentTable.length() - 1; 4293 if (nextInTable()) { 4294 return; 4295 } 4296 } 4297 } 4298 } 4299 4300 /** Finds the next entry in the current chain. Returns true if an entry was found. */ 4301 boolean nextInChain() { 4302 if (nextEntry != null) { 4303 for (nextEntry = nextEntry.getNext(); nextEntry != null; nextEntry = nextEntry.getNext()) { 4304 if (advanceTo(nextEntry)) { 4305 return true; 4306 } 4307 } 4308 } 4309 return false; 4310 } 4311 4312 /** Finds the next entry in the current table. Returns true if an entry was found. */ 4313 boolean nextInTable() { 4314 while (nextTableIndex >= 0) { 4315 if ((nextEntry = currentTable.get(nextTableIndex--)) != null) { 4316 if (advanceTo(nextEntry) || nextInChain()) { 4317 return true; 4318 } 4319 } 4320 } 4321 return false; 4322 } 4323 4324 /** 4325 * Advances to the given entry. Returns true if the entry was valid, false if it should be 4326 * skipped. 4327 */ 4328 boolean advanceTo(ReferenceEntry<K, V> entry) { 4329 try { 4330 long now = ticker.read(); 4331 K key = entry.getKey(); 4332 V value = getLiveValue(entry, now); 4333 if (value != null) { 4334 nextExternal = new WriteThroughEntry(key, value); 4335 return true; 4336 } else { 4337 // Skip stale entry. 4338 return false; 4339 } 4340 } finally { 4341 currentSegment.postReadCleanup(); 4342 } 4343 } 4344 4345 @Override 4346 public boolean hasNext() { 4347 return nextExternal != null; 4348 } 4349 4350 WriteThroughEntry nextEntry() { 4351 if (nextExternal == null) { 4352 throw new NoSuchElementException(); 4353 } 4354 lastReturned = nextExternal; 4355 advance(); 4356 return lastReturned; 4357 } 4358 4359 @Override 4360 public void remove() { 4361 checkState(lastReturned != null); 4362 LocalCache.this.remove(lastReturned.getKey()); 4363 lastReturned = null; 4364 } 4365 } 4366 4367 final class KeyIterator extends HashIterator<K> { 4368 4369 @Override 4370 public K next() { 4371 return nextEntry().getKey(); 4372 } 4373 } 4374 4375 final class ValueIterator extends HashIterator<V> { 4376 4377 @Override 4378 public V next() { 4379 return nextEntry().getValue(); 4380 } 4381 } 4382 4383 /** 4384 * Custom Entry class used by EntryIterator.next(), that relays setValue changes to the underlying 4385 * map. 4386 */ 4387 final class WriteThroughEntry implements Entry<K, V> { 4388 final K key; // non-null 4389 V value; // non-null 4390 4391 WriteThroughEntry(K key, V value) { 4392 this.key = key; 4393 this.value = value; 4394 } 4395 4396 @Override 4397 public K getKey() { 4398 return key; 4399 } 4400 4401 @Override 4402 public V getValue() { 4403 return value; 4404 } 4405 4406 @Override 4407 public boolean equals(@CheckForNull Object object) { 4408 // Cannot use key and value equivalence 4409 if (object instanceof Entry) { 4410 Entry<?, ?> that = (Entry<?, ?>) object; 4411 return key.equals(that.getKey()) && value.equals(that.getValue()); 4412 } 4413 return false; 4414 } 4415 4416 @Override 4417 public int hashCode() { 4418 // Cannot use key and value equivalence 4419 return key.hashCode() ^ value.hashCode(); 4420 } 4421 4422 @Override 4423 public V setValue(V newValue) { 4424 V oldValue = put(key, newValue); 4425 value = newValue; // only if put succeeds 4426 return oldValue; 4427 } 4428 4429 @Override 4430 public String toString() { 4431 return getKey() + "=" + getValue(); 4432 } 4433 } 4434 4435 final class EntryIterator extends HashIterator<Entry<K, V>> { 4436 4437 @Override 4438 public Entry<K, V> next() { 4439 return nextEntry(); 4440 } 4441 } 4442 4443 abstract class AbstractCacheSet<T> extends AbstractSet<T> { 4444 @Override 4445 public int size() { 4446 return LocalCache.this.size(); 4447 } 4448 4449 @Override 4450 public boolean isEmpty() { 4451 return LocalCache.this.isEmpty(); 4452 } 4453 4454 @Override 4455 public void clear() { 4456 LocalCache.this.clear(); 4457 } 4458 4459 // super.toArray() may misbehave if size() is inaccurate, at least on old versions of Android. 4460 // https://code.google.com/p/android/issues/detail?id=36519 / http://r.android.com/47508 4461 4462 @Override 4463 public Object[] toArray() { 4464 return toArrayList(this).toArray(); 4465 } 4466 4467 @Override 4468 public <E> E[] toArray(E[] a) { 4469 return toArrayList(this).toArray(a); 4470 } 4471 } 4472 4473 private static <E> ArrayList<E> toArrayList(Collection<E> c) { 4474 // Avoid calling ArrayList(Collection), which may call back into toArray. 4475 ArrayList<E> result = new ArrayList<>(c.size()); 4476 Iterators.addAll(result, c.iterator()); 4477 return result; 4478 } 4479 4480 final class KeySet extends AbstractCacheSet<K> { 4481 4482 @Override 4483 public Iterator<K> iterator() { 4484 return new KeyIterator(); 4485 } 4486 4487 @Override 4488 public boolean contains(Object o) { 4489 return LocalCache.this.containsKey(o); 4490 } 4491 4492 @Override 4493 public boolean remove(Object o) { 4494 return LocalCache.this.remove(o) != null; 4495 } 4496 } 4497 4498 final class Values extends AbstractCollection<V> { 4499 @Override 4500 public int size() { 4501 return LocalCache.this.size(); 4502 } 4503 4504 @Override 4505 public boolean isEmpty() { 4506 return LocalCache.this.isEmpty(); 4507 } 4508 4509 @Override 4510 public void clear() { 4511 LocalCache.this.clear(); 4512 } 4513 4514 @Override 4515 public Iterator<V> iterator() { 4516 return new ValueIterator(); 4517 } 4518 4519 @Override 4520 public boolean contains(Object o) { 4521 return LocalCache.this.containsValue(o); 4522 } 4523 4524 // super.toArray() may misbehave if size() is inaccurate, at least on old versions of Android. 4525 // https://code.google.com/p/android/issues/detail?id=36519 / http://r.android.com/47508 4526 4527 @Override 4528 public Object[] toArray() { 4529 return toArrayList(this).toArray(); 4530 } 4531 4532 @Override 4533 public <E> E[] toArray(E[] a) { 4534 return toArrayList(this).toArray(a); 4535 } 4536 } 4537 4538 final class EntrySet extends AbstractCacheSet<Entry<K, V>> { 4539 4540 @Override 4541 public Iterator<Entry<K, V>> iterator() { 4542 return new EntryIterator(); 4543 } 4544 4545 @Override 4546 public boolean contains(Object o) { 4547 if (!(o instanceof Entry)) { 4548 return false; 4549 } 4550 Entry<?, ?> e = (Entry<?, ?>) o; 4551 Object key = e.getKey(); 4552 if (key == null) { 4553 return false; 4554 } 4555 V v = LocalCache.this.get(key); 4556 4557 return v != null && valueEquivalence.equivalent(e.getValue(), v); 4558 } 4559 4560 @Override 4561 public boolean remove(Object o) { 4562 if (!(o instanceof Entry)) { 4563 return false; 4564 } 4565 Entry<?, ?> e = (Entry<?, ?>) o; 4566 Object key = e.getKey(); 4567 return key != null && LocalCache.this.remove(key, e.getValue()); 4568 } 4569 } 4570 4571 // Serialization Support 4572 4573 /** 4574 * Serializes the configuration of a LocalCache, reconstituting it as a Cache using CacheBuilder 4575 * upon deserialization. An instance of this class is fit for use by the writeReplace of 4576 * LocalManualCache. 4577 * 4578 * <p>Unfortunately, readResolve() doesn't get called when a circular dependency is present, so 4579 * the proxy must be able to behave as the cache itself. 4580 */ 4581 static class ManualSerializationProxy<K, V> extends ForwardingCache<K, V> 4582 implements Serializable { 4583 private static final long serialVersionUID = 1; 4584 4585 final Strength keyStrength; 4586 final Strength valueStrength; 4587 final Equivalence<Object> keyEquivalence; 4588 final Equivalence<Object> valueEquivalence; 4589 final long expireAfterWriteNanos; 4590 final long expireAfterAccessNanos; 4591 final long maxWeight; 4592 final Weigher<K, V> weigher; 4593 final int concurrencyLevel; 4594 final RemovalListener<? super K, ? super V> removalListener; 4595 @CheckForNull final Ticker ticker; 4596 final CacheLoader<? super K, V> loader; 4597 4598 @CheckForNull transient Cache<K, V> delegate; 4599 4600 ManualSerializationProxy(LocalCache<K, V> cache) { 4601 this( 4602 cache.keyStrength, 4603 cache.valueStrength, 4604 cache.keyEquivalence, 4605 cache.valueEquivalence, 4606 cache.expireAfterWriteNanos, 4607 cache.expireAfterAccessNanos, 4608 cache.maxWeight, 4609 cache.weigher, 4610 cache.concurrencyLevel, 4611 cache.removalListener, 4612 cache.ticker, 4613 cache.defaultLoader); 4614 } 4615 4616 private ManualSerializationProxy( 4617 Strength keyStrength, 4618 Strength valueStrength, 4619 Equivalence<Object> keyEquivalence, 4620 Equivalence<Object> valueEquivalence, 4621 long expireAfterWriteNanos, 4622 long expireAfterAccessNanos, 4623 long maxWeight, 4624 Weigher<K, V> weigher, 4625 int concurrencyLevel, 4626 RemovalListener<? super K, ? super V> removalListener, 4627 Ticker ticker, 4628 CacheLoader<? super K, V> loader) { 4629 this.keyStrength = keyStrength; 4630 this.valueStrength = valueStrength; 4631 this.keyEquivalence = keyEquivalence; 4632 this.valueEquivalence = valueEquivalence; 4633 this.expireAfterWriteNanos = expireAfterWriteNanos; 4634 this.expireAfterAccessNanos = expireAfterAccessNanos; 4635 this.maxWeight = maxWeight; 4636 this.weigher = weigher; 4637 this.concurrencyLevel = concurrencyLevel; 4638 this.removalListener = removalListener; 4639 this.ticker = (ticker == Ticker.systemTicker() || ticker == NULL_TICKER) ? null : ticker; 4640 this.loader = loader; 4641 } 4642 4643 CacheBuilder<K, V> recreateCacheBuilder() { 4644 CacheBuilder<K, V> builder = 4645 CacheBuilder.newBuilder() 4646 .setKeyStrength(keyStrength) 4647 .setValueStrength(valueStrength) 4648 .keyEquivalence(keyEquivalence) 4649 .valueEquivalence(valueEquivalence) 4650 .concurrencyLevel(concurrencyLevel) 4651 .removalListener(removalListener); 4652 builder.strictParsing = false; 4653 if (expireAfterWriteNanos > 0) { 4654 builder.expireAfterWrite(expireAfterWriteNanos, TimeUnit.NANOSECONDS); 4655 } 4656 if (expireAfterAccessNanos > 0) { 4657 builder.expireAfterAccess(expireAfterAccessNanos, TimeUnit.NANOSECONDS); 4658 } 4659 if (weigher != OneWeigher.INSTANCE) { 4660 Object unused = builder.weigher(weigher); 4661 if (maxWeight != UNSET_INT) { 4662 builder.maximumWeight(maxWeight); 4663 } 4664 } else { 4665 if (maxWeight != UNSET_INT) { 4666 builder.maximumSize(maxWeight); 4667 } 4668 } 4669 if (ticker != null) { 4670 builder.ticker(ticker); 4671 } 4672 return builder; 4673 } 4674 4675 private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { 4676 in.defaultReadObject(); 4677 CacheBuilder<K, V> builder = recreateCacheBuilder(); 4678 this.delegate = builder.build(); 4679 } 4680 4681 private Object readResolve() { 4682 return delegate; 4683 } 4684 4685 @Override 4686 protected Cache<K, V> delegate() { 4687 return delegate; 4688 } 4689 } 4690 4691 /** 4692 * Serializes the configuration of a LocalCache, reconstituting it as an LoadingCache using 4693 * CacheBuilder upon deserialization. An instance of this class is fit for use by the writeReplace 4694 * of LocalLoadingCache. 4695 * 4696 * <p>Unfortunately, readResolve() doesn't get called when a circular dependency is present, so 4697 * the proxy must be able to behave as the cache itself. 4698 */ 4699 static final class LoadingSerializationProxy<K, V> extends ManualSerializationProxy<K, V> 4700 implements LoadingCache<K, V>, Serializable { 4701 private static final long serialVersionUID = 1; 4702 4703 @CheckForNull transient LoadingCache<K, V> autoDelegate; 4704 4705 LoadingSerializationProxy(LocalCache<K, V> cache) { 4706 super(cache); 4707 } 4708 4709 private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { 4710 in.defaultReadObject(); 4711 CacheBuilder<K, V> builder = recreateCacheBuilder(); 4712 this.autoDelegate = builder.build(loader); 4713 } 4714 4715 @Override 4716 public V get(K key) throws ExecutionException { 4717 return autoDelegate.get(key); 4718 } 4719 4720 @Override 4721 public V getUnchecked(K key) { 4722 return autoDelegate.getUnchecked(key); 4723 } 4724 4725 @Override 4726 public ImmutableMap<K, V> getAll(Iterable<? extends K> keys) throws ExecutionException { 4727 return autoDelegate.getAll(keys); 4728 } 4729 4730 @Override 4731 public V apply(K key) { 4732 return autoDelegate.apply(key); 4733 } 4734 4735 @Override 4736 public void refresh(K key) { 4737 autoDelegate.refresh(key); 4738 } 4739 4740 private Object readResolve() { 4741 return autoDelegate; 4742 } 4743 } 4744 4745 static class LocalManualCache<K, V> implements Cache<K, V>, Serializable { 4746 final LocalCache<K, V> localCache; 4747 4748 LocalManualCache(CacheBuilder<? super K, ? super V> builder) { 4749 this(new LocalCache<>(builder, null)); 4750 } 4751 4752 private LocalManualCache(LocalCache<K, V> localCache) { 4753 this.localCache = localCache; 4754 } 4755 4756 // Cache methods 4757 4758 @Override 4759 @CheckForNull 4760 public V getIfPresent(Object key) { 4761 return localCache.getIfPresent(key); 4762 } 4763 4764 @Override 4765 public V get(K key, final Callable<? extends V> valueLoader) throws ExecutionException { 4766 checkNotNull(valueLoader); 4767 return localCache.get( 4768 key, 4769 new CacheLoader<Object, V>() { 4770 @Override 4771 public V load(Object key) throws Exception { 4772 return valueLoader.call(); 4773 } 4774 }); 4775 } 4776 4777 @Override 4778 public ImmutableMap<K, V> getAllPresent(Iterable<?> keys) { 4779 return localCache.getAllPresent(keys); 4780 } 4781 4782 @Override 4783 public void put(K key, V value) { 4784 localCache.put(key, value); 4785 } 4786 4787 @Override 4788 public void putAll(Map<? extends K, ? extends V> m) { 4789 localCache.putAll(m); 4790 } 4791 4792 @Override 4793 public void invalidate(Object key) { 4794 checkNotNull(key); 4795 localCache.remove(key); 4796 } 4797 4798 @Override 4799 public void invalidateAll(Iterable<?> keys) { 4800 localCache.invalidateAll(keys); 4801 } 4802 4803 @Override 4804 public void invalidateAll() { 4805 localCache.clear(); 4806 } 4807 4808 @Override 4809 public long size() { 4810 return localCache.longSize(); 4811 } 4812 4813 @Override 4814 public ConcurrentMap<K, V> asMap() { 4815 return localCache; 4816 } 4817 4818 @Override 4819 public CacheStats stats() { 4820 SimpleStatsCounter aggregator = new SimpleStatsCounter(); 4821 aggregator.incrementBy(localCache.globalStatsCounter); 4822 for (Segment<K, V> segment : localCache.segments) { 4823 aggregator.incrementBy(segment.statsCounter); 4824 } 4825 return aggregator.snapshot(); 4826 } 4827 4828 @Override 4829 public void cleanUp() { 4830 localCache.cleanUp(); 4831 } 4832 4833 // Serialization Support 4834 4835 private static final long serialVersionUID = 1; 4836 4837 Object writeReplace() { 4838 return new ManualSerializationProxy<>(localCache); 4839 } 4840 4841 private void readObject(ObjectInputStream in) throws InvalidObjectException { 4842 throw new InvalidObjectException("Use ManualSerializationProxy"); 4843 } 4844 } 4845 4846 static class LocalLoadingCache<K, V> extends LocalManualCache<K, V> 4847 implements LoadingCache<K, V> { 4848 4849 LocalLoadingCache( 4850 CacheBuilder<? super K, ? super V> builder, CacheLoader<? super K, V> loader) { 4851 super(new LocalCache<>(builder, checkNotNull(loader))); 4852 } 4853 4854 // LoadingCache methods 4855 4856 @Override 4857 public V get(K key) throws ExecutionException { 4858 return localCache.getOrLoad(key); 4859 } 4860 4861 @CanIgnoreReturnValue // TODO(b/27479612): consider removing this 4862 @Override 4863 public V getUnchecked(K key) { 4864 try { 4865 return get(key); 4866 } catch (ExecutionException e) { 4867 throw new UncheckedExecutionException(e.getCause()); 4868 } 4869 } 4870 4871 @Override 4872 public ImmutableMap<K, V> getAll(Iterable<? extends K> keys) throws ExecutionException { 4873 return localCache.getAll(keys); 4874 } 4875 4876 @Override 4877 public void refresh(K key) { 4878 localCache.refresh(key); 4879 } 4880 4881 @Override 4882 public final V apply(K key) { 4883 return getUnchecked(key); 4884 } 4885 4886 // Serialization Support 4887 4888 private static final long serialVersionUID = 1; 4889 4890 @Override 4891 Object writeReplace() { 4892 return new LoadingSerializationProxy<>(localCache); 4893 } 4894 4895 private void readObject(ObjectInputStream in) throws InvalidObjectException { 4896 throw new InvalidObjectException("Use LoadingSerializationProxy"); 4897 } 4898 } 4899 } 4900