Example usage for com.google.common.cache RemovalNotification getValue

List of usage examples for com.google.common.cache RemovalNotification getValue

Introduction

In this page you can find the example usage for com.google.common.cache RemovalNotification getValue.

Prototype

@Nullable
    @Override
    public V getValue() 

Source Link

Usage

From source file:org.apache.gobblin.hive.HiveMetastoreClientPool.java

private static final Cache<Optional<String>, HiveMetastoreClientPool> createPoolCache(
        final Properties properties) {
    long duration = properties.containsKey(POOL_CACHE_TTL_MINUTES_KEY)
            ? Long.parseLong(properties.getProperty(POOL_CACHE_TTL_MINUTES_KEY))
            : DEFAULT_POOL_CACHE_TTL_MINUTES;
    return CacheBuilder.newBuilder().expireAfterAccess(duration, TimeUnit.MINUTES)
            .removalListener(new RemovalListener<Optional<String>, HiveMetastoreClientPool>() {
                @Override//  www  .  j a va 2s .  c  om
                public void onRemoval(
                        RemovalNotification<Optional<String>, HiveMetastoreClientPool> notification) {
                    if (notification.getValue() != null) {
                        notification.getValue().close();
                    }
                }
            }).build();
}

From source file:com.nirmata.workflow.details.Scheduler.java

private static void remover(RemovalNotification<TaskType, Queue> notification) {
    CloseableUtils.closeQuietly(notification.getValue());
}

From source file:com.qubole.rubix.bookkeeper.BookKeeper.java

private static synchronized void initializeCache(final Configuration conf) {
    long avail = 0;
    for (int d = 0; d < CacheConfig.numDisks(conf); d++) {
        avail += new File(CacheConfig.getDirPath(conf, d)).getUsableSpace();
    }//from w  w w.j a v  a 2s. c  om
    avail = avail / 1024 / 1024;
    final long total = avail;
    log.info("total free space " + avail + "MB");
    fileMetadataCache = CacheBuilder.newBuilder().weigher(new Weigher<String, FileMetadata>() {
        @Override
        public int weigh(String key, FileMetadata md) {
            // weights are in MB to avoid overflowing due to large files
            // This is not accurate, we are placing weight as whole filesize
            // Rather it should be dynamic and should be equal to size of file data cached
            // But guava needs weight fixed at init
            // TODO: find a way to set weight accurately and get away from current workaround
            int weight = (int) (md.getOccupiedSize() / 1024 / 1024);
            log.info("weighing key " + key + " as " + weight);
            return weight;
        }
    }).maximumWeight((long) (avail * 1.0 * CacheConfig.getCacheDataFullnessPercentage(conf) / 100.0))
            .expireAfterWrite(CacheConfig.getCacheDataExpirationAfterWrite(conf), TimeUnit.SECONDS)
            .removalListener(new RemovalListener<String, FileMetadata>() {
                public void onRemoval(RemovalNotification<String, FileMetadata> notification) {
                    try {
                        FileMetadata md = notification.getValue();
                        if (notification.getCause() == RemovalCause.EXPIRED) {
                            // This is to workaround the static weighing of Guava Cache, logic goes like this:
                            // We evict aggressively but do not delete backing data unless running out of space
                            // On next get() on cache, fileMetadata.getOccupiedSize will return size occupied on disk
                            md.close();
                            log.info("Evicting " + md.getRemotePath().toString() + " due to "
                                    + notification.getCause());
                            return;
                        }

                        if (notification.getCause() == RemovalCause.SIZE) {
                            // Here also we wont delete unless very close to disk full
                            long free = 0;
                            for (int d = 0; d < CacheConfig.numDisks(conf); d++) {
                                free += new File(CacheConfig.getDirPath(conf, d)).getUsableSpace();
                            }
                            if (free > total * 1.0
                                    * (100.0 - CacheConfig.getCacheDataFullnessPercentage(conf) / 100)) {
                                // still havent utilized the allowed space so do not delete the backing file
                                md.close();
                                log.warn("Evicting " + md.getRemotePath().toString() + " due to "
                                        + notification.getCause());
                                return;
                            }
                        }
                        //if file has been modified in cloud, its entry will be deleted due to "EXPLICIT"
                        log.warn("deleting entry for" + md.getRemotePath().toString() + " due to "
                                + notification.getCause());
                        md.closeAndCleanup();
                    } catch (IOException e) {
                        throw Throwables.propagate(e);
                    }
                }
            }).build();
}

From source file:ca.mcgill.networkdynamics.geoinference.CreateDataset.java

private static List<File> createUsersFile(List<File> inputFiles, File tmpDir, File datasetDir)
        throws IOException {
    TLongObjectMap<File> userToFile = new TLongObjectHashMap<File>();

    AtomicInteger curWriterCount = new AtomicInteger();
    AtomicInteger numUsersInCurWriter = new AtomicInteger();
    AtomicReference<File> curWriter = new AtomicReference<File>();

    AtomicLong postsSeen = new AtomicLong();

    AtomicInteger filesProcessed = new AtomicInteger();
    final ReentrantReadWriteLock rwl = new ReentrantReadWriteLock();

    LoadingCache<File, PrintWriter> fileWriters = CacheBuilder.newBuilder().maximumSize(MAX_OPEN_FILES)
            .removalListener(new RemovalListener<File, PrintWriter>() {
                public void onRemoval(RemovalNotification<File, PrintWriter> notification) {
                    notification.getValue().close();
                }//  www  .ja v a  2 s  .c  o  m
            }).build(new CacheLoader<File, PrintWriter>() {
                public PrintWriter load(File key) throws Exception {
                    return openGzAppend(key);
                }
            });

    inputFiles.parallelStream().forEach(f -> {
        try {
            BufferedReader br = openGz(f);
            for (String line = null; (line = br.readLine()) != null;) {
                JSONObject post = new JSONObject(line);
                if (!post.has("user"))
                    continue;
                JSONObject user = post.getJSONObject("user");
                long uid = user.getLong("id");
                File tmpFile = null;
                rwl.readLock().lock();
                tmpFile = userToFile.get(uid);
                rwl.readLock().unlock();

                if (tmpFile == null) {
                    // double lock check
                    rwl.readLock().lock();
                    tmpFile = userToFile.get(uid);
                    rwl.readLock().unlock();

                    if (tmpFile == null) {
                        rwl.writeLock().lock();
                        if (curWriter.get() == null || numUsersInCurWriter.get() == MAX_USERS_PER_TMP_FILE) {

                            tmpFile = new File(tmpDir, "users." + curWriterCount + ".json.tmp.gz");
                            curWriter.set(tmpFile);
                            curWriterCount.incrementAndGet();
                            numUsersInCurWriter.set(0);
                        }

                        tmpFile = curWriter.get();
                        userToFile.put(uid, tmpFile);
                        numUsersInCurWriter.incrementAndGet();
                        fileWriters.get(tmpFile).println(line);
                        rwl.writeLock().unlock();
                    } else {
                        fileWriters.get(tmpFile).println(line);
                    }
                }
                // If the user already has a writer, print its line to this file
                else {
                    fileWriters.get(tmpFile).println(line);
                }

                long ps = postsSeen.incrementAndGet();
                if (ps % 500_000 == 0) {
                    System.out.printf("Processed %d posts from %d/%d " + "input files into %d tmp files%n", ps,
                            filesProcessed.get(), inputFiles.size(), curWriterCount.get());
                }
            }
            br.close();
            filesProcessed.incrementAndGet();
        } catch (JSONException je) {
            je.printStackTrace();
        } catch (ExecutionException ee) {
            ee.printStackTrace();
        } catch (IOException ie) {
            ie.printStackTrace();
        }
    });

    System.out.printf("FINISHED processing %d posts from %d/%d " + "input files into %d tmp files%n",
            postsSeen.get(), filesProcessed.get(), inputFiles.size(), curWriterCount.get() + 1);

    totalNumPosts = postsSeen.get();

    // Finish writing
    for (PrintWriter pw : fileWriters.asMap().values())
        pw.close();

    File usersFile = new File(datasetDir, "users.json.gz");
    PrintWriter usersPw_ = null;
    try {
        usersPw_ = openGzWrite(usersFile);
    } catch (IOException ioe) {
        throw new IOError(ioe);
    }
    PrintWriter usersPw = usersPw_;

    int numFilesProcessed = 0;
    AtomicLong numPostsRecorded = new AtomicLong();

    Set<File> tmpFiles = new HashSet<File>(userToFile.valueCollection());

    // Sort each file to organize its users
    for (File tmpFile : tmpFiles) {

        // TODO(?): check that the file isn't too big to process in memory
        if (isTooBigToProcessInMemory(tmpFile)) {
            throw new UnsupportedOperationException("please file a ticket in github");
        }

        List<JSONObject> sortedPosts = new ArrayList<JSONObject>(1_000_000);
        BufferedReader br = openGz(tmpFile);
        for (String line = null; (line = br.readLine()) != null;) {
            try {
                sortedPosts.add(new JSONObject(line));
            } catch (JSONException je) {
                je.printStackTrace();
            }
        }
        br.close();

        // Clean up our mess, since this file is no longer needed
        tmpFile.delete();

        Collections.sort(sortedPosts, new Comparator<JSONObject>() {
            public int compare(JSONObject jo1, JSONObject jo2) {
                try {
                    long uid1 = jo1.getJSONObject("user").getLong("id");
                    long uid2 = jo2.getJSONObject("user").getLong("id");
                    return (uid1 == uid2) ? Long.compare(jo1.getLong("id"), jo2.getLong("id"))
                            : Long.compare(uid1, uid2);
                } catch (JSONException je) {
                    throw new IllegalStateException(je);
                }
            }
        });

        sortedPosts.parallelStream().collect(Collectors.groupingBy(post -> getUid(post))).entrySet()
                .forEach(e -> {
                    long uid = e.getKey();
                    List<JSONObject> posts = e.getValue();
                    try {
                        JSONObject combined = new JSONObject();
                        combined.put("user_id", uid);
                        JSONArray postsArr = new JSONArray();
                        for (JSONObject post : posts)
                            postsArr.put(post);
                        combined.put("posts", postsArr);
                        synchronized (usersPw) {
                            usersPw.println(combined);
                        }

                        numPostsRecorded.addAndGet(posts.size());
                    } catch (JSONException je) {
                        throw new IllegalStateException(je);
                    }
                });

        ++numFilesProcessed;

        System.out.printf("Processed tmp file %d/%d, recorded %d posts%n", numFilesProcessed, tmpFiles.size(),
                numPostsRecorded.get());
    }

    usersPw.close();
    System.out.println("Done creating users file");
    return null;
}

From source file:com.addthis.tutor.tree.TreeRemovalListener.java

@Override
public void onRemoval(RemovalNotification<String, TreeTutorState> notification) {
    TreeTutorState state = notification.getValue();
    File dir = state.getDir();//from   w  w w.j  a  v  a 2 s . co m
    LessFiles.deleteDir(dir);
}

From source file:org.objectfabric.GoogleCache.java

@SuppressWarnings("unchecked")
public GoogleCache(CacheBuilder builder) {
    super(true, new GoogleCacheBackend(builder.build().asMap()));

    builder.removalListener(new RemovalListener() {

        @Override//from www.ja va  2s  .  c  o  m
        public void onRemoval(RemovalNotification notification) {
            onEviction(notification.getValue());
        }
    });
}

From source file:com.addthis.hydra.data.query.engine.EngineRemovalListener.java

@Override
public void onRemoval(RemovalNotification<String, QueryEngine> notification) {
    QueryEngine qe = notification.getValue();
    // a refresh call that returns the current value can generate spurious events
    QueryEngine currentEngine = engineCache.loadingEngineCache.asMap().get(notification.getKey());
    if (currentEngine != qe) {
        assert qe != null; //we only use strong references
        try {//from w  w w . ja v  a2s.  c o  m
            qe.closeWhenIdle();
        } catch (Throwable t) {
            log.error("Generic Error while closing Engine", t);
        }
        if (currentEngine == null) {
            directoriesEvicted.mark();
        }
    }
}

From source file:de.metas.ui.web.view.DefaultViewsRepositoryStorage.java

private final void onViewRemoved(final RemovalNotification<Object, Object> notification) {
    final IView view = (IView) notification.getValue();
    final ViewCloseReason closeReason = ViewCloseReason.fromCacheEvictedFlag(notification.wasEvicted());
    view.close(closeReason);/*ww w.  j a  v  a2 s  . c  o  m*/
}

From source file:org.trustedanalytics.atk.graphbuilder.titan.cache.TitanGraphCache.java

/**
 * Shut down a Titan graph when it is evicted from the cache
 *//*from ww  w. jav a 2 s .  c o  m*/
private RemovalListener<Configuration, TitanGraph> createRemovalListener() {
    RemovalListener<Configuration, TitanGraph> removalListener = new RemovalListener<Configuration, TitanGraph>() {
        public void onRemoval(RemovalNotification<Configuration, TitanGraph> removal) {
            TitanGraph titanGraph = removal.getValue();
            if (titanGraph != null) {
                LOG.info("Evicting a standard Titan graph from the cache: " + cache.stats());
                titanGraph.shutdown();
            }
        }
    };
    return (removalListener);
}

From source file:com.torodb.mongowp.client.core.GuavaCachedMongoClientFactory.java

private void onRemoval(RemovalNotification<HostAndPort, CachedMongoClient> notification) {
    notification.getValue().delegate.close();
}