LRU Cache 2 : Cache « Development Class « Java






LRU Cache 2

 
/*
 * Copyright (c) 2008-2011 Simon Ritchie.
 * All rights reserved. 
 * 
 * This program is free software: you can redistribute it and/or modify 
 * it under the terms of the GNU Lesser General Public License as published 
 * by the Free Software Foundation, either version 3 of the License, or 
 * (at your option) any later version.
 * 
 * This program is distributed in the hope that it will be useful, but 
 * WITHOUT ANY WARRANTY; without even the implied warranty of 
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  
 * See the GNU Lesser General Public License for more details.
 * 
 * You should have received a copy of the GNU Lesser General Public License 
 * along with this program.  If not, see http://www.gnu.org/licenses/>.
 */
//package org.rimudb.util;

import java.util.ArrayList;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;

public class LRUCache2<K, V> {
  private static final float hashTableLoadFactor = 0.75f;

  private LinkedHashMap<K, V> map;
  private int cacheSize;

  /**
   * Creates a new LRU cache.
   * 
   * @param cacheSize The maximum number of entries that will be kept in this cache.
   */
  public LRUCache2(int cacheSize) {
    this.cacheSize = cacheSize;
    int hashTableCapacity = (int) Math.ceil(cacheSize / hashTableLoadFactor) + 1;
    
    map = new LinkedHashMap<K, V>(hashTableCapacity, hashTableLoadFactor, true) {
      private static final long serialVersionUID = 1;

      @Override
      protected boolean removeEldestEntry(Map.Entry<K, V> eldest) {
        return size() > LRUCache2.this.cacheSize;
      }
    };
  }

  /**
   * Retrieves an entry from the cache.<br>
   * The retrieved entry is move to the head of the list as it
   * is the most recently used.
   * 
   * @param key
   * @return The value associated to this key
   */
  public synchronized V get(K key) {
    return map.get(key);
  }

  /**
   * Adds an entry to this cache. If the cache is full, the LRU (least
   * recently used) entry is dropped.
   * 
   * @param key
   * @param value
   */
  public synchronized void put(K key, V value) {
    map.put(key, value);
  }

  /**
   * Remove an entry from the cache.
   * 
   * @param key
   */
  public synchronized void remove(K key) {
    map.remove(key);
  }

  /**
   * Clears the cache.
   */
  public synchronized void clear() {
    map.clear();
  }

  /**
   * Returns the number of used entries in the cache.
   * 
   * @return the number of entries currently in the cache.
   */
  public synchronized int usedEntries() {
    return map.size();
  }

  /**
   * Returns a <code>Collection</code> that contains a copy of all cache
   * entries.
   * 
   * @return a <code>Collection</code> with a copy of the cache content.
   */
  public synchronized Collection<Map.Entry<K, V>> getAll() {
    return new ArrayList<Map.Entry<K, V>>(map.entrySet());
  }

  public synchronized Set<K> keySet() {
    return map.keySet();
  }

  public int getMaximumSize() {
    return cacheSize;
  }
}
----------------
/*
 * Copyright (c) 2008-2011 Simon Ritchie.
 * All rights reserved. 
 * 
 * This program is free software: you can redistribute it and/or modify 
 * it under the terms of the GNU Lesser General Public License as published 
 * by the Free Software Foundation, either version 3 of the License, or 
 * (at your option) any later version.
 * 
 * This program is distributed in the hope that it will be useful, but 
 * WITHOUT ANY WARRANTY; without even the implied warranty of 
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  
 * See the GNU Lesser General Public License for more details.
 * 
 * You should have received a copy of the GNU Lesser General Public License 
 * along with this program.  If not, see http://www.gnu.org/licenses/>.
 */
package org.rimudb.util;

import java.util.*;
import java.util.Map.*;

import junit.framework.*;

public class LRUCache2Tests extends TestCase {
  private LRUCache2<String, String> cache = null;

  public LRUCache2Tests(String name) {
    super(name);
  }

  protected void setUp() throws Exception {
    cache = new LRUCache2<String, String>(5);
  }

  protected void tearDown() throws Exception {
    super.tearDown();
  }

  public void testGet() {
    cache.put("key1", "value1");
    cache.put("key2", "value2");
    cache.put("key3", "value3");
    cache.put("key4", "value4");
    cache.put("key5", "value5");
    cache.put("key6", "value6");
    
    assertNull(cache.get("key1"));
    assertEquals("value2", cache.get("key2"));
    assertEquals("value3", cache.get("key3"));
    assertEquals("value4", cache.get("key4"));
    assertEquals("value5", cache.get("key5"));
    assertEquals("value6", cache.get("key6"));
  }

  public void testRemove() {
    cache.put("key1", "value1");
    cache.put("key2", "value2");
    cache.put("key3", "value3");
    cache.put("key4", "value4");
    cache.put("key5", "value5");
    
    assertEquals(5, cache.usedEntries());
    cache.remove("key3");
    assertEquals(4, cache.usedEntries());
  }

  public void testClear() {
    cache.put("key1", "value1");
    cache.put("key2", "value2");
    cache.put("key3", "value3");
    cache.put("key4", "value4");
    cache.put("key5", "value5");
    
    assertEquals(5, cache.usedEntries());
    cache.clear();
    assertEquals(0, cache.usedEntries());
  }

  public void testUsedEntries() {
    cache.put("key1", "value1");
    cache.put("key2", "value2");
    assertEquals(2, cache.usedEntries());
    cache.put("key3", "value3");
    assertEquals(3, cache.usedEntries());
    cache.put("key4", "value4");
    cache.put("key5", "value5");
    assertEquals(5, cache.usedEntries());
    cache.put("key6", "value6");
    assertEquals(5, cache.usedEntries());
    cache.put("key7", "value7");
    assertEquals(5, cache.usedEntries());
  }

  public void testGetAll() {
    cache.put("key1", "value1");
    cache.put("key2", "value2");
    cache.put("key3", "value3");
    cache.put("key4", "value4");
    cache.put("key5", "value5");
    Collection<Entry<String, String>> cacheCopy = cache.getAll();
    assertEquals(5, cacheCopy.size());
  }

  public void testKeySet() {
    cache.put("key1", "value1");
    cache.put("key2", "value2");
    cache.put("key3", "value3");
    cache.put("key4", "value4");
    cache.put("key5", "value5");
    Set<String> keyset = cache.keySet();
    assertTrue(keyset.contains("key1"));
    assertTrue(keyset.contains("key2"));
    assertTrue(keyset.contains("key3"));
    assertTrue(keyset.contains("key4"));
    assertTrue(keyset.contains("key5"));
  }

  public void testGetMaximumSize() {
    assertEquals(5, cache.getMaximumSize());
    cache.put("key1", "value1");
    cache.put("key2", "value2");
    cache.put("key3", "value3");
    cache.put("key4", "value4");
    cache.put("key5", "value5");
    assertEquals(5, cache.getMaximumSize());
  }

}

   
  








Related examples in the same category

1.A LRU (Least Recently Used) cache replacement policy
2.A Map that is size-limited using an LRU algorithm
3.A random cache replacement policy
4.A second chance FIFO (First In First Out) cache replacement policy
5.An LRU (Least Recently Used) cache replacement policy
6.Async LRU List
7.FIFO First In First Out cache replacement policy
8.Implementation of a Least Recently Used cache policy
9.Generic LRU Cache
10.LRU Cache
11.A Least Recently Used Cache
12.The class that implements a simple LRU cache
13.Map implementation for cache usage
14.Weak Cache Map
15.Provider for the application cache directories.
16.Fixed length cache with a LRU replacement policy.
17.A small LRU object cache.
18.A least recently used (LRU) cache.
19.A cache that purges values according to their frequency and recency of use and other qualitative values.
20.A thread-safe cache that keeps its values as java.lang.ref.SoftReference so that the cache is, in effect, managed by the JVM and kept as small as is required
21.Cache LRU
22.A FastCache is a map implemented with soft references, optimistic copy-on-write updates, and approximate count-based pruning.
23.A HardFastCache is a map implemented with hard references, optimistic copy-on-write updates, and approximate count-based pruning.