• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright (C) 2003 Vladimir Roubtsov. All rights reserved.
2  *
3  * This program and the accompanying materials are made available under
4  * the terms of the Common Public License v1.0 which accompanies this distribution,
5  * and is available at http://www.eclipse.org/legal/cpl-v10.html
6  *
7  * $Id: SoftValueMap.java,v 1.1.1.1 2004/05/09 16:57:55 vlad_r Exp $
8  */
9 package com.vladium.util;
10 
11 import java.lang.ref.Reference;
12 import java.lang.ref.ReferenceQueue;
13 import java.lang.ref.SoftReference;
14 import java.util.Collection;
15 import java.util.Map;
16 import java.util.Set;
17 
18 // ----------------------------------------------------------------------------
19 /**
20  * MT-safety: an instance of this class is <I>not</I> safe for access from
21  * multiple concurrent threads [even if access is done by a single thread at a
22  * time]. The caller is expected to synchronize externally on an instance [the
23  * implementation does not do internal synchronization for the sake of efficiency].
24  * java.util.ConcurrentModificationException is not supported either.
25  *
26  * @author (C) 2002, Vlad Roubtsov
27  */
28 public
29 final class SoftValueMap implements Map
30 {
31     // public: ................................................................
32 
33     // TODO: for caching, does clearing of entries make sense? only to save
34     // entry memory -- which does not make sense if the set of key values is not
35     // growing over time... on the other hand, if the key set is unbounded,
36     // entry clearing is needed so that the hash table does not get polluted with
37     // empty-valued entries
38     // TODO: provide mode that disables entry clearing
39     // TODO: add shrinking rehashes (is it worth it?)
40 
41     /**
42      * Equivalent to <CODE>SoftValueMap(1, 1)</CODE>.
43      */
SoftValueMap()44     public SoftValueMap ()
45     {
46         this (1, 1);
47     }
48 
49     /**
50      * Equivalent to <CODE>SoftValueMap(11, 0.75F, getClearCheckFrequency, putClearCheckFrequency)</CODE>.
51      */
SoftValueMap(final int readClearCheckFrequency, final int writeClearCheckFrequency)52     public SoftValueMap (final int readClearCheckFrequency, final int writeClearCheckFrequency)
53     {
54         this (11, 0.75F, readClearCheckFrequency, writeClearCheckFrequency);
55     }
56 
57     /**
58      * Constructs a SoftValueMap with specified initial capacity, load factor,
59      * and cleared value removal frequencies.
60      *
61      * @param initialCapacity initial number of hash buckets in the table
62      * [may not be negative, 0 is equivalent to 1].
63      * @param loadFactor the load factor to use to determine rehashing points
64      * [must be in (0.0, 1.0] range].
65      * @param readClearCheckFrequency specifies that every readClearCheckFrequency
66      * {@link #get} should check for and remove all mappings whose soft values
67      * have been cleared by the garbage collector [may not be less than 1].
68      * @param writeClearCheckFrequency specifies that every writeClearCheckFrequency
69      * {@link #put} should check for and remove all mappings whose soft values
70      * have been cleared by the garbage collector [may not be less than 1].
71      */
SoftValueMap(int initialCapacity, final float loadFactor, final int readClearCheckFrequency, final int writeClearCheckFrequency)72     public SoftValueMap (int initialCapacity, final float loadFactor, final int readClearCheckFrequency, final int writeClearCheckFrequency)
73     {
74         if (initialCapacity < 0)
75             throw new IllegalArgumentException ("negative input: initialCapacity [" + initialCapacity + "]");
76         if ((loadFactor <= 0.0) || (loadFactor >= 1.0 + 1.0E-6))
77             throw new IllegalArgumentException ("loadFactor not in (0.0, 1.0] range: " + loadFactor);
78         if (readClearCheckFrequency < 1)
79             throw new IllegalArgumentException ("readClearCheckFrequency not in [1, +inf) range: " + readClearCheckFrequency);
80         if (writeClearCheckFrequency < 1)
81             throw new IllegalArgumentException ("writeClearCheckFrequency not in [1, +inf) range: " + writeClearCheckFrequency);
82 
83         if (initialCapacity == 0) initialCapacity = 1;
84 
85         m_valueReferenceQueue = new ReferenceQueue ();
86 
87         m_loadFactor = loadFactor;
88         m_sizeThreshold = (int) (initialCapacity * loadFactor);
89 
90         m_readClearCheckFrequency = readClearCheckFrequency;
91         m_writeClearCheckFrequency = writeClearCheckFrequency;
92 
93         m_buckets = new SoftEntry [initialCapacity];
94     }
95 
96 
97     // unsupported operations:
98 
equals(final Object rhs)99     public boolean equals (final Object rhs)
100     {
101         throw new UnsupportedOperationException ("not implemented: equals");
102     }
103 
hashCode()104     public int hashCode ()
105     {
106         throw new UnsupportedOperationException ("not implemented: hashCode");
107     }
108 
109 
110     /**
111      * Overrides Object.toString() for debug purposes.
112      */
toString()113     public String toString ()
114     {
115         final StringBuffer s = new StringBuffer ();
116         debugDump (s);
117 
118         return s.toString ();
119     }
120 
121 
122     /**
123      * Returns the number of key-value mappings in this map. Some of the values
124      * may have been cleared already but not removed from the table.<P>
125      *
126      * <B>NOTE:</B> in contrast with the java.util.WeakHashMap implementation,
127      * this is a constant time operation.
128      */
size()129     public int size ()
130     {
131         return m_size;
132     }
133 
134     /**
135      * Returns 'false' is this map contains key-value mappings (even if some of
136      * the values may have been cleared already but not removed from the table).<P>
137      *
138      * <B>NOTE:</B> in contrast with the java.util.WeakHashMap implementation,
139      * this is a constant time operation.
140      */
isEmpty()141     public boolean isEmpty ()
142     {
143         return m_size == 0;
144     }
145 
146     /**
147      * Returns the value that is mapped to a given 'key'. Returns
148      * null if (a) this key has never been mapped or (b) a previously mapped
149      * value has been cleared by the garbage collector and removed from the table.
150      *
151      * @param key mapping key [may not be null].
152      *
153      * @return Object value mapping for 'key' [can be null].
154      */
get(final Object key)155     public Object get (final Object key)
156     {
157         if (key == null) throw new IllegalArgumentException ("null input: key");
158 
159         if ((++ m_readAccessCount % m_readClearCheckFrequency) == 0) removeClearedValues ();
160 
161         // index into the corresponding hash bucket:
162         final int keyHashCode = key.hashCode ();
163         final SoftEntry [] buckets = m_buckets;
164         final int bucketIndex = (keyHashCode & 0x7FFFFFFF) % buckets.length;
165 
166         Object result = null;
167 
168         // traverse the singly-linked list of entries in the bucket:
169         for (SoftEntry entry = buckets [bucketIndex]; entry != null; entry = entry.m_next)
170         {
171             final Object entryKey = entry.m_key;
172 
173             if (IDENTITY_OPTIMIZATION)
174             {
175                 // note: this uses an early identity comparison opimization, making this a bit
176                 // faster for table keys that do not override equals() [Thread, etc]
177                 if ((key == entryKey) || ((keyHashCode == entryKey.hashCode ()) && key.equals (entryKey)))
178                 {
179                     final Reference ref = entry.m_softValue;
180                     result = ref.get (); // may return null to the caller
181 
182                     // [see comment for ENQUEUE_FOUND_CLEARED_ENTRIES]
183                     if (ENQUEUE_FOUND_CLEARED_ENTRIES && (result == null))
184                     {
185                         ref.enqueue ();
186                     }
187 
188                     return result;
189                 }
190             }
191             else
192             {
193                 if ((keyHashCode == entryKey.hashCode ()) && key.equals (entryKey))
194                 {
195                     final Reference ref = entry.m_softValue;
196                     result = ref.get (); // may return null to the caller
197 
198                     // [see comment for ENQUEUE_FOUND_CLEARED_ENTRIES]
199                     if (ENQUEUE_FOUND_CLEARED_ENTRIES && (result == null))
200                     {
201                         ref.enqueue ();
202                     }
203 
204                     return result;
205                 }
206             }
207         }
208 
209         return null;
210     }
211 
212     /**
213      * Updates the table to map 'key' to 'value'. Any existing mapping is overwritten.
214      *
215      * @param key mapping key [may not be null].
216      * @param value mapping value [may not be null].
217      *
218      * @return Object previous value mapping for 'key' [null if no previous mapping
219      * existed or its value has been cleared by the garbage collector and removed from the table].
220      */
put(final Object key, final Object value)221     public Object put (final Object key, final Object value)
222     {
223         if (key == null) throw new IllegalArgumentException ("null input: key");
224         if (value == null) throw new IllegalArgumentException ("null input: value");
225 
226         if ((++ m_writeAccessCount % m_writeClearCheckFrequency) == 0) removeClearedValues ();
227 
228         SoftEntry currentKeyEntry = null;
229 
230         // detect if 'key' is already in the table [in which case, set 'currentKeyEntry' to point to its entry]:
231 
232         // index into the corresponding hash bucket:
233         final int keyHashCode = key.hashCode ();
234         SoftEntry [] buckets = m_buckets;
235         int bucketIndex = (keyHashCode & 0x7FFFFFFF) % buckets.length;
236 
237         // traverse the singly-linked list of entries in the bucket:
238         for (SoftEntry entry = buckets [bucketIndex]; entry != null; entry = entry.m_next)
239         {
240             final Object entryKey = entry.m_key;
241 
242             if (IDENTITY_OPTIMIZATION)
243             {
244                 // note: this uses an early identity comparison opimization, making this a bit
245                 // faster for table keys that do not override equals() [Thread, etc]
246                 if ((key == entryKey) || ((keyHashCode == entryKey.hashCode ()) && key.equals (entryKey)))
247                 {
248                     currentKeyEntry = entry;
249                     break;
250                 }
251             }
252             else
253             {
254                 if ((keyHashCode == entryKey.hashCode ()) && key.equals (entryKey))
255                 {
256                     currentKeyEntry = entry;
257                     break;
258                 }
259             }
260         }
261 
262         if (currentKeyEntry != null)
263         {
264             // replace the current value:
265 
266             final IndexedSoftReference ref = currentKeyEntry.m_softValue;
267             final Object currentKeyValue = ref.get (); // can be null already [no need to work around the get() bug, though]
268 
269             if (currentKeyValue == null) ref.m_bucketIndex = -1; // disable removal by removeClearedValues() [need to do this because of the identity comparison there]
270             currentKeyEntry.m_softValue = new IndexedSoftReference (value, m_valueReferenceQueue, bucketIndex);
271 
272             return currentKeyValue; // may return null to the caller
273         }
274         else
275         {
276             // add a new entry:
277 
278             if (m_size >= m_sizeThreshold) rehash ();
279 
280             // recompute the hash bucket index:
281             buckets = m_buckets;
282             bucketIndex = (keyHashCode & 0x7FFFFFFF) % buckets.length;
283             final SoftEntry bucketListHead = buckets [bucketIndex];
284             final SoftEntry newEntry = new SoftEntry (m_valueReferenceQueue, key, value, bucketListHead, bucketIndex);
285             buckets [bucketIndex] = newEntry;
286 
287             ++ m_size;
288 
289             return null;
290         }
291     }
292 
remove(final Object key)293     public Object remove (final Object key)
294     {
295         if (key == null) throw new IllegalArgumentException ("null input: key");
296 
297         if ((++ m_writeAccessCount % m_writeClearCheckFrequency) == 0) removeClearedValues ();
298 
299         // index into the corresponding hash bucket:
300         final int keyHashCode = key.hashCode ();
301         final SoftEntry [] buckets = m_buckets;
302         final int bucketIndex = (keyHashCode & 0x7FFFFFFF) % buckets.length;
303 
304         Object result = null;
305 
306         // traverse the singly-linked list of entries in the bucket:
307         for (SoftEntry entry = buckets [bucketIndex], prev = null; entry != null; prev = entry, entry = entry.m_next)
308         {
309             final Object entryKey = entry.m_key;
310 
311             if ((IDENTITY_OPTIMIZATION && (entryKey == key)) || ((keyHashCode == entryKey.hashCode ()) && key.equals (entryKey)))
312             {
313                 if (prev == null) // head of the list
314                 {
315                     buckets [bucketIndex] = entry.m_next;
316                 }
317                 else
318                 {
319                     prev.m_next = entry.m_next;
320                 }
321 
322                 final IndexedSoftReference ref = entry.m_softValue;
323                 result = ref.get (); // can be null already [however, no need to work around 4485942]
324 
325                 // [regardless of whether the value has been enqueued or not, disable its processing by removeClearedValues() since the entire entry is removed here]
326                 ref.m_bucketIndex = -1;
327 
328                 // help GC:
329                 entry.m_softValue = null;
330                 entry.m_key = null;
331                 entry.m_next = null;
332                 entry = null;
333 
334                 -- m_size;
335                 break;
336             }
337         }
338 
339         return result;
340     }
341 
342 
clear()343     public void clear ()
344     {
345         final SoftEntry [] buckets = m_buckets;
346 
347         for (int b = 0, bLimit = buckets.length; b < bLimit; ++ b)
348         {
349             for (SoftEntry entry = buckets [b]; entry != null; )
350             {
351                 final SoftEntry next = entry.m_next; // remember next pointer because we are going to reuse this entry
352 
353                 // [regardless of whether the value has been enqueued or not, disable its processing by removeClearedValues()]
354                 entry.m_softValue.m_bucketIndex = -1;
355 
356                 // help GC:
357                 entry.m_softValue = null;
358                 entry.m_next = null;
359                 entry.m_key = null;
360 
361                 entry = next;
362             }
363 
364             buckets [b] = null;
365         }
366 
367         m_size = 0;
368         m_readAccessCount = 0;
369         m_writeAccessCount = 0;
370     }
371 
372 
373     // unsupported operations:
374 
containsKey(final Object key)375     public boolean containsKey (final Object key)
376     {
377         throw new UnsupportedOperationException ("not implemented: containsKey");
378     }
379 
containsValue(final Object value)380     public boolean containsValue (final Object value)
381     {
382         throw new UnsupportedOperationException ("not implemented: containsValue");
383     }
384 
putAll(final Map map)385     public void putAll (final Map map)
386     {
387         throw new UnsupportedOperationException ("not implemented: putAll");
388     }
389 
keySet()390     public Set keySet ()
391     {
392         throw new UnsupportedOperationException ("not implemented: keySet");
393     }
394 
entrySet()395     public Set entrySet ()
396     {
397         throw new UnsupportedOperationException ("not implemented: entrySet");
398     }
399 
values()400     public Collection values ()
401     {
402         throw new UnsupportedOperationException ("not implemented: values");
403     }
404 
405     // protected: .............................................................
406 
407     // package: ...............................................................
408 
409 
debugDump(final StringBuffer out)410     void debugDump (final StringBuffer out)
411     {
412         if (out != null)
413         {
414             out.append (getClass ().getName ().concat ("@").concat (Integer.toHexString (System.identityHashCode (this)))); out.append (EOL);
415             out.append ("size = " + m_size + ", bucket table size = " + m_buckets.length + ", load factor = " + m_loadFactor + EOL);
416             out.append ("size threshold = " + m_sizeThreshold + ", get clear frequency = " + m_readClearCheckFrequency + ", put clear frequency = " + m_writeClearCheckFrequency + EOL);
417             out.append ("get count: " + m_readAccessCount + ", put count: " + m_writeAccessCount + EOL);
418         }
419     }
420 
421     // private: ...............................................................
422 
423 
424     /**
425      * An extension of WeakReference that can store an index of the bucket it
426      * is associated with.
427      */
428     static class IndexedSoftReference extends SoftReference
429     {
IndexedSoftReference(final Object referent, ReferenceQueue queue, final int bucketIndex)430         IndexedSoftReference (final Object referent, ReferenceQueue queue, final int bucketIndex)
431         {
432             super (referent, queue);
433 
434             m_bucketIndex = bucketIndex;
435         }
436 
437         int m_bucketIndex;
438 
439     } // end of nested class
440 
441 
442     /**
443      * The structure used for chaining colliding keys.
444      */
445     static class SoftEntry
446     {
SoftEntry(final ReferenceQueue valueReferenceQueue, final Object key, Object value, final SoftEntry next, final int bucketIndex)447         SoftEntry (final ReferenceQueue valueReferenceQueue, final Object key, Object value, final SoftEntry next, final int bucketIndex)
448         {
449             m_key = key;
450 
451             m_softValue = new IndexedSoftReference (value, valueReferenceQueue, bucketIndex); // ... do not retain a strong reference to the value
452             value = null;
453 
454             m_next = next;
455         }
456 
457         IndexedSoftReference m_softValue; // soft reference to the value [never null]
458         Object m_key;  // strong reference to the key [never null]
459 
460         SoftEntry m_next; // singly-linked list link
461 
462     } // end of nested class
463 
464 
465     /**
466      * Re-hashes the table into a new array of buckets. During the process
467      * cleared value entries are discarded, making for another efficient cleared
468      * value removal method.
469      */
rehash()470     private void rehash ()
471     {
472         // TODO: it is possible to run this method twice, first time using the 2*k+1 prime sequencer for newBucketCount
473         // and then with that value reduced to actually shrink capacity. As it is right now, the bucket table can
474         // only grow in size
475 
476         final SoftEntry [] buckets = m_buckets;
477 
478         final int newBucketCount = (m_buckets.length << 1) + 1;
479         final SoftEntry [] newBuckets = new SoftEntry [newBucketCount];
480 
481         int newSize = 0;
482 
483         // rehash all entry chains in every bucket:
484         for (int b = 0, bLimit = buckets.length; b < bLimit; ++ b)
485         {
486             for (SoftEntry entry = buckets [b]; entry != null; )
487             {
488                 final SoftEntry next = entry.m_next; // remember next pointer because we are going to reuse this entry
489 
490                 IndexedSoftReference ref = entry.m_softValue; // get the soft value reference
491 
492                 Object entryValue = ref.get (); // convert the soft reference to a local strong one
493 
494                 // skip entries whose keys have been cleared: this also saves on future removeClearedValues() work
495                 if (entryValue != null)
496                 {
497                     // [assertion: 'softValue' couldn't have been enqueued already and can't be enqueued until strong reference in 'entryKey' is nulled out]
498 
499                     // index into the corresponding new hash bucket:
500                     final int entryKeyHashCode = entry.m_key.hashCode ();
501                     final int newBucketIndex = (entryKeyHashCode & 0x7FFFFFFF) % newBucketCount;
502 
503                     final SoftEntry bucketListHead = newBuckets [newBucketIndex];
504                     entry.m_next = bucketListHead;
505                     newBuckets [newBucketIndex] = entry;
506 
507                     // adjust bucket index:
508                     ref.m_bucketIndex = newBucketIndex;
509 
510                     ++ newSize;
511 
512                     entryValue = null;
513                 }
514                 else
515                 {
516                     // ['softValue' may or may not have been enqueued already]
517 
518                     // adjust bucket index:
519                     // [regardless of whether 'softValue' has been enqueued or not, disable its removal by removeClearedValues() since the buckets get restructured]
520                     ref.m_bucketIndex = -1;
521                 }
522 
523                 entry = next;
524             }
525         }
526 
527         if (DEBUG)
528         {
529             if (m_size > newSize) System.out.println ("DEBUG: rehash() cleared " + (m_size - newSize) + " values, new size = " + newSize);
530         }
531 
532         m_size = newSize;
533         m_sizeThreshold = (int) (newBucketCount * m_loadFactor);
534         m_buckets = newBuckets;
535     }
536 
537 
538     /**
539      * Removes all entries whose soft values have been cleared _and_ enqueued.
540      * See comments below for why this is safe wrt to rehash().
541      */
removeClearedValues()542     private void removeClearedValues ()
543     {
544         int count = 0;
545 
546 next:   for (Reference _ref; (_ref = m_valueReferenceQueue.poll ()) != null; )
547         {
548             // remove entry containing '_ref' using its bucket index and identity comparison:
549 
550             // index into the corresponding hash bucket:
551             final int bucketIndex = ((IndexedSoftReference) _ref).m_bucketIndex;
552 
553             if (bucketIndex >= 0) // skip keys that were already removed by rehash()
554             {
555                 // [assertion: this reference was not cleared when the last rehash() ran and so its m_bucketIndex is correct]
556 
557                 // traverse the singly-linked list of entries in the bucket:
558                 for (SoftEntry entry = m_buckets [bucketIndex], prev = null; entry != null; prev = entry, entry = entry.m_next)
559                 {
560                     if (entry.m_softValue == _ref)
561                     {
562                         if (prev == null) // head of the list
563                         {
564                             m_buckets [bucketIndex] = entry.m_next;
565                         }
566                         else
567                         {
568                             prev.m_next = entry.m_next;
569                         }
570 
571                         entry.m_softValue = null;
572                         entry.m_key = null;
573                         entry.m_next = null;
574                         entry = null;
575 
576                         -- m_size;
577 
578                         if (DEBUG) ++ count;
579                         continue next;
580                     }
581                 }
582 
583                 // no match found this can happen if a soft value got replaced by a put
584 
585                 final StringBuffer msg = new StringBuffer ("removeClearedValues(): soft reference [" + _ref + "] did not match within bucket #" + bucketIndex + EOL);
586                 debugDump (msg);
587 
588                 throw new Error (msg.toString ());
589             }
590             // else: it has already been removed by rehash() or other methods
591         }
592 
593         if (DEBUG)
594         {
595             if (count > 0) System.out.println ("DEBUG: removeClearedValues() cleared " + count + " keys, new size = " + m_size);
596         }
597     }
598 
599 
600     private final ReferenceQueue m_valueReferenceQueue; // reference queue for all references used by SoftEntry objects used by this table
601     private final float m_loadFactor; // determines the setting of m_sizeThreshold
602     private final int m_readClearCheckFrequency, m_writeClearCheckFrequency; // parameters determining frequency of running removeClearedKeys() by get() and put()/remove(), respectively
603 
604     private SoftEntry [] m_buckets; // table of buckets
605     private int m_size; // number of values in the table, not cleared as of last check
606     private int m_sizeThreshold; // size threshold for rehashing
607     private int m_readAccessCount, m_writeAccessCount;
608 
609     private static final String EOL = System.getProperty ("line.separator", "\n");
610 
611     private static final boolean IDENTITY_OPTIMIZATION          = true;
612 
613     // setting this to 'true' is an optimization and a workaround for bug 4485942:
614     private static final boolean ENQUEUE_FOUND_CLEARED_ENTRIES  = true;
615 
616     private static final boolean DEBUG = false;
617 
618 } // end of class
619 // ----------------------------------------------------------------------------
620