Example usage for org.apache.lucene.store Directory sync

List of usage examples for org.apache.lucene.store Directory sync

Introduction

In this page you can find the example usage for org.apache.lucene.store Directory sync.

Prototype

public abstract void sync(Collection<String> names) throws IOException;

Source Link

Document

Ensures that any writes to these files are moved to stable storage (made durable).

Usage

From source file:com.bah.lucene.BaseDirectoryTestSuite.java

License:Apache License

private Directory getControlDir(final Directory control, final Directory test) {
    return new Directory() {

        @Override//from  w  w  w  .  j a va 2 s  . c  o m
        public Lock makeLock(String name) {
            return control.makeLock(name);
        }

        @Override
        public void clearLock(String name) throws IOException {
            control.clearLock(name);
        }

        @Override
        public void setLockFactory(LockFactory lockFactory) throws IOException {
            control.setLockFactory(lockFactory);
        }

        @Override
        public LockFactory getLockFactory() {
            return control.getLockFactory();
        }

        @Override
        public String getLockID() {
            return control.getLockID();
        }

        @Override
        public void copy(Directory to, String src, String dest, IOContext context) throws IOException {
            control.copy(to, src, dest, context);
        }

        @Override
        public IndexInputSlicer createSlicer(String name, IOContext context) throws IOException {
            return control.createSlicer(name, context);
        }

        @Override
        public IndexOutput createOutput(final String name, IOContext context) throws IOException {
            final IndexOutput testOutput = test.createOutput(name, context);
            final IndexOutput controlOutput = control.createOutput(name, context);
            return new IndexOutput() {

                @Override
                public void flush() throws IOException {
                    testOutput.flush();
                    controlOutput.flush();
                }

                @Override
                public void close() throws IOException {
                    testOutput.close();
                    controlOutput.close();
                }

                @Override
                public long getFilePointer() {
                    long filePointer = testOutput.getFilePointer();
                    long controlFilePointer = controlOutput.getFilePointer();
                    if (controlFilePointer != filePointer) {
                        System.err.println("Output Name [" + name + "] with filePointer [" + filePointer
                                + "] and control filePointer [" + controlFilePointer + "] does not match");
                    }
                    return filePointer;
                }

                @SuppressWarnings("deprecation")
                @Override
                public void seek(long pos) throws IOException {
                    testOutput.seek(pos);
                    controlOutput.seek(pos);
                }

                @Override
                public long length() throws IOException {
                    long length = testOutput.length();
                    long controlLength = controlOutput.length();
                    if (controlLength != length) {
                        System.err.println("Ouput Name [" + name + "] with length [" + length
                                + "] and control length [" + controlLength + "] does not match");
                    }
                    return length;
                }

                @Override
                public void writeByte(byte b) throws IOException {
                    testOutput.writeByte(b);
                    controlOutput.writeByte(b);
                }

                @Override
                public void writeBytes(byte[] b, int offset, int length) throws IOException {
                    testOutput.writeBytes(b, offset, length);
                    controlOutput.writeBytes(b, offset, length);
                }

            };
        }

        @Override
        public IndexInput openInput(final String name, IOContext context) throws IOException {
            final IndexInput testInput = test.openInput(name, context);
            final IndexInput controlInput = control.openInput(name, context);
            return new IndexInputCompare(name, testInput, controlInput);
        }

        @Override
        public String[] listAll() throws IOException {
            return test.listAll();
        }

        @Override
        public boolean fileExists(String name) throws IOException {
            return test.fileExists(name);
        }

        @Override
        public void deleteFile(String name) throws IOException {
            test.deleteFile(name);
            control.deleteFile(name);
        }

        @Override
        public long fileLength(String name) throws IOException {
            long fileLength = test.fileLength(name);
            long controlFileLength = control.fileLength(name);
            if (controlFileLength != fileLength) {
                System.err.println("Input Name [" + name + "] with length [" + fileLength
                        + "] and control length [" + controlFileLength + "] does not match");
            }
            return fileLength;
        }

        @Override
        public void sync(Collection<String> names) throws IOException {
            test.sync(names);
            test.sync(names);
        }

        @Override
        public void close() throws IOException {
            test.close();
            control.close();
        }
    };
}

From source file:it.unibz.instasearch.indexing.StorageIndexer.java

License:Open Source License

/**
 * Delethe the whole index//from   w  ww  . ja v a  2  s .  c om
 * @throws Exception
 */
public void deleteIndex() throws Exception {

    RetryingRunnable runnable = new RetryingRunnable() {
        public void run() throws Exception {
            IndexWriter w = createIndexWriter(true); // open for writing and close (make empty)
            w.deleteAll();
            w.commit();
            w.close(true);

            Directory dir = getIndexDir();
            for (String file : dir.listAll()) {
                if (dir.fileExists(file)) // still exits
                {
                    dir.sync(file);
                    dir.deleteFile(file);
                }
            }
            dir.close();
        }

        public boolean handleException(Throwable e) {
            return true;
        }
    };

    changeListener.onIndexReset(); // close searcher because index is deleted

    runRetryingRunnable(runnable); // delete index with retry
}

From source file:org.apache.solr.core.TestDirectoryFactory.java

License:Apache License

private void testExistsBehavior(Class<? extends DirectoryFactory> clazz) throws Exception {
    final String path = createTempDir().toString() + "/" + clazz + "_somedir";
    DirectoryFactory dirFac = null;/*from w ww  .  ja va  2  s.  c  o  m*/
    try {
        dirFac = clazz.newInstance();
        dirFac.initCoreContainer(null); // greybox testing directly against path
        dirFac.init(new NamedList());

        assertFalse(path + " should not exist yet", dirFac.exists(path));
        Directory dir = dirFac.get(path, DirectoryFactory.DirContext.DEFAULT,
                DirectoryFactory.LOCK_TYPE_SINGLE);
        try {
            assertFalse(path + " should still not exist", dirFac.exists(path));
            try (IndexOutput file = dir.createOutput("test_file", IOContext.DEFAULT)) {
                file.writeInt(42);

                // TODO: even StandardDirectoryFactory & NRTCachingDirectoryFactory can't agree on this...
                // ... should we consider this explicitly undefinied?
                // ... or should *all* Caching DirFactories consult the cache as well as the disk itself?
                // assertFalse(path + " should still not exist until file is closed", dirFac.exists(path));

            } // implicitly close file...

            // TODO: even StandardDirectoryFactory & NRTCachingDirectoryFactory can't agree on this...
            // ... should we consider this explicitly undefinied?
            // ... or should *all* Caching DirFactories consult the cache as well as the disk itself?
            // assertTrue(path + " should exist once file is closed", dirFac.exists(path));

            dir.sync(Collections.singleton("test_file"));
            assertTrue(path + " should exist once file is synced", dirFac.exists(path));

        } finally {
            dirFac.release(dir);
        }
        assertTrue(path + " should still exist even after being released", dirFac.exists(path));

    } catch (AssertionError ae) {
        throw new AssertionError(clazz + ": " + ae.getMessage());
    } finally {
        if (null != dirFac) {
            dirFac.close();
        }
    }
}

From source file:org.apache.solr.handler.IndexFetcher.java

License:Apache License

/**
 * Helper method to record the last replication's details so that we can show them on the statistics page across
 * restarts.//from w ww  .  ja  v  a 2 s. c  o  m
 * @throws IOException on IO error
 */
private void logReplicationTimeAndConfFiles(Collection<Map<String, Object>> modifiedConfFiles,
        boolean successfulInstall) throws IOException {
    List<String> confFiles = new ArrayList<>();
    if (modifiedConfFiles != null && !modifiedConfFiles.isEmpty())
        for (Map<String, Object> map1 : modifiedConfFiles)
            confFiles.add((String) map1.get(NAME));

    Properties props = replicationHandler.loadReplicationProperties();
    long replicationTime = System.currentTimeMillis();
    long replicationTimeTaken = (replicationTime - getReplicationStartTime()) / 1000;
    Directory dir = null;
    try {
        dir = solrCore.getDirectoryFactory().get(solrCore.getDataDir(), DirContext.META_DATA,
                solrCore.getSolrConfig().indexConfig.lockType);

        int indexCount = 1, confFilesCount = 1;
        if (props.containsKey(TIMES_INDEX_REPLICATED)) {
            indexCount = Integer.valueOf(props.getProperty(TIMES_INDEX_REPLICATED)) + 1;
        }
        StringBuilder sb = readToStringBuilder(replicationTime, props.getProperty(INDEX_REPLICATED_AT_LIST));
        props.setProperty(INDEX_REPLICATED_AT_LIST, sb.toString());
        props.setProperty(INDEX_REPLICATED_AT, String.valueOf(replicationTime));
        props.setProperty(PREVIOUS_CYCLE_TIME_TAKEN, String.valueOf(replicationTimeTaken));
        props.setProperty(TIMES_INDEX_REPLICATED, String.valueOf(indexCount));
        if (modifiedConfFiles != null && !modifiedConfFiles.isEmpty()) {
            props.setProperty(CONF_FILES_REPLICATED, confFiles.toString());
            props.setProperty(CONF_FILES_REPLICATED_AT, String.valueOf(replicationTime));
            if (props.containsKey(TIMES_CONFIG_REPLICATED)) {
                confFilesCount = Integer.valueOf(props.getProperty(TIMES_CONFIG_REPLICATED)) + 1;
            }
            props.setProperty(TIMES_CONFIG_REPLICATED, String.valueOf(confFilesCount));
        }

        props.setProperty(LAST_CYCLE_BYTES_DOWNLOADED, String.valueOf(getTotalBytesDownloaded()));
        if (!successfulInstall) {
            int numFailures = 1;
            if (props.containsKey(TIMES_FAILED)) {
                numFailures = Integer.valueOf(props.getProperty(TIMES_FAILED)) + 1;
            }
            props.setProperty(TIMES_FAILED, String.valueOf(numFailures));
            props.setProperty(REPLICATION_FAILED_AT, String.valueOf(replicationTime));
            sb = readToStringBuilder(replicationTime, props.getProperty(REPLICATION_FAILED_AT_LIST));
            props.setProperty(REPLICATION_FAILED_AT_LIST, sb.toString());
        }

        final IndexOutput out = dir.createOutput(REPLICATION_PROPERTIES, DirectoryFactory.IOCONTEXT_NO_CACHE);
        Writer outFile = new OutputStreamWriter(new PropertiesOutputStream(out), StandardCharsets.UTF_8);
        try {
            props.store(outFile, "Replication details");
            dir.sync(Collections.singleton(REPLICATION_PROPERTIES));
        } finally {
            IOUtils.closeQuietly(outFile);
        }
    } catch (Exception e) {
        LOG.warn("Exception while updating statistics", e);
    } finally {
        if (dir != null) {
            solrCore.getDirectoryFactory().release(dir);
        }
    }
}

From source file:org.apache.solr.handler.IndexFetcher.java

License:Apache License

/**
 * If the index is stale by any chance, load index from a different dir in the data dir.
 *///from w w  w . j  a v a  2 s  . c  om
private boolean modifyIndexProps(String tmpIdxDirName) {
    LOG.info("New index installed. Updating index properties... index=" + tmpIdxDirName);
    Properties p = new Properties();
    Directory dir = null;
    try {
        dir = solrCore.getDirectoryFactory().get(solrCore.getDataDir(), DirContext.META_DATA,
                solrCore.getSolrConfig().indexConfig.lockType);
        if (slowFileExists(dir, IndexFetcher.INDEX_PROPERTIES)) {
            final IndexInput input = dir.openInput(IndexFetcher.INDEX_PROPERTIES,
                    DirectoryFactory.IOCONTEXT_NO_CACHE);

            final InputStream is = new PropertiesInputStream(input);
            try {
                p.load(new InputStreamReader(is, StandardCharsets.UTF_8));
            } catch (Exception e) {
                LOG.error("Unable to load " + IndexFetcher.INDEX_PROPERTIES, e);
            } finally {
                IOUtils.closeQuietly(is);
            }
        }
        try {
            dir.deleteFile(IndexFetcher.INDEX_PROPERTIES);
        } catch (IOException e) {
            // no problem
        }
        final IndexOutput out = dir.createOutput(IndexFetcher.INDEX_PROPERTIES,
                DirectoryFactory.IOCONTEXT_NO_CACHE);
        p.put("index", tmpIdxDirName);
        Writer os = null;
        try {
            os = new OutputStreamWriter(new PropertiesOutputStream(out), StandardCharsets.UTF_8);
            p.store(os, IndexFetcher.INDEX_PROPERTIES);
            dir.sync(Collections.singleton(INDEX_PROPERTIES));
        } catch (Exception e) {
            throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
                    "Unable to write " + IndexFetcher.INDEX_PROPERTIES, e);
        } finally {
            IOUtils.closeQuietly(os);
        }
        return true;

    } catch (IOException e1) {
        throw new RuntimeException(e1);
    } finally {
        if (dir != null) {
            try {
                solrCore.getDirectoryFactory().release(dir);
            } catch (IOException e) {
                SolrException.log(LOG, "", e);
            }
        }
    }

}

From source file:org.apache.solr.handler.SnapPuller.java

License:Apache License

/**
 * Helper method to record the last replication's details so that we can show them on the statistics page across
 * restarts./*from  w  ww  .j  a v  a2  s . com*/
 * @throws IOException on IO error
 */
private void logReplicationTimeAndConfFiles(Collection<Map<String, Object>> modifiedConfFiles,
        boolean successfulInstall) throws IOException {
    List<String> confFiles = new ArrayList<String>();
    if (modifiedConfFiles != null && !modifiedConfFiles.isEmpty())
        for (Map<String, Object> map1 : modifiedConfFiles)
            confFiles.add((String) map1.get(NAME));

    Properties props = replicationHandler.loadReplicationProperties();
    long replicationTime = System.currentTimeMillis();
    long replicationTimeTaken = (replicationTime - getReplicationStartTime()) / 1000;
    Directory dir = null;
    try {
        dir = solrCore.getDirectoryFactory().get(solrCore.getDataDir(), DirContext.META_DATA,
                solrCore.getSolrConfig().indexConfig.lockType);

        int indexCount = 1, confFilesCount = 1;
        if (props.containsKey(TIMES_INDEX_REPLICATED)) {
            indexCount = Integer.valueOf(props.getProperty(TIMES_INDEX_REPLICATED)) + 1;
        }
        StringBuffer sb = readToStringBuffer(replicationTime, props.getProperty(INDEX_REPLICATED_AT_LIST));
        props.setProperty(INDEX_REPLICATED_AT_LIST, sb.toString());
        props.setProperty(INDEX_REPLICATED_AT, String.valueOf(replicationTime));
        props.setProperty(PREVIOUS_CYCLE_TIME_TAKEN, String.valueOf(replicationTimeTaken));
        props.setProperty(TIMES_INDEX_REPLICATED, String.valueOf(indexCount));
        if (modifiedConfFiles != null && !modifiedConfFiles.isEmpty()) {
            props.setProperty(CONF_FILES_REPLICATED, confFiles.toString());
            props.setProperty(CONF_FILES_REPLICATED_AT, String.valueOf(replicationTime));
            if (props.containsKey(TIMES_CONFIG_REPLICATED)) {
                confFilesCount = Integer.valueOf(props.getProperty(TIMES_CONFIG_REPLICATED)) + 1;
            }
            props.setProperty(TIMES_CONFIG_REPLICATED, String.valueOf(confFilesCount));
        }

        props.setProperty(LAST_CYCLE_BYTES_DOWNLOADED, String.valueOf(getTotalBytesDownloaded(this)));
        if (!successfulInstall) {
            int numFailures = 1;
            if (props.containsKey(TIMES_FAILED)) {
                numFailures = Integer.valueOf(props.getProperty(TIMES_FAILED)) + 1;
            }
            props.setProperty(TIMES_FAILED, String.valueOf(numFailures));
            props.setProperty(REPLICATION_FAILED_AT, String.valueOf(replicationTime));
            sb = readToStringBuffer(replicationTime, props.getProperty(REPLICATION_FAILED_AT_LIST));
            props.setProperty(REPLICATION_FAILED_AT_LIST, sb.toString());
        }

        final IndexOutput out = dir.createOutput(REPLICATION_PROPERTIES, DirectoryFactory.IOCONTEXT_NO_CACHE);
        Writer outFile = new OutputStreamWriter(new PropertiesOutputStream(out), CHARSET_UTF_8);
        try {
            props.store(outFile, "Replication details");
            dir.sync(Collections.singleton(REPLICATION_PROPERTIES));
        } finally {
            IOUtils.closeQuietly(outFile);
        }
    } catch (Exception e) {
        LOG.warn("Exception while updating statistics", e);
    } finally {
        if (dir != null) {
            solrCore.getDirectoryFactory().release(dir);
        }
    }
}

From source file:org.apache.solr.handler.SnapPuller.java

License:Apache License

/**
 * If the index is stale by any chance, load index from a different dir in the data dir.
 *///from  w  w  w  . j  av a  2s.  c  om
private boolean modifyIndexProps(String tmpIdxDirName) {
    LOG.info("New index installed. Updating index properties... index=" + tmpIdxDirName);
    Properties p = new Properties();
    Directory dir = null;
    try {
        dir = solrCore.getDirectoryFactory().get(solrCore.getDataDir(), DirContext.META_DATA,
                solrCore.getSolrConfig().indexConfig.lockType);
        if (dir.fileExists(SnapPuller.INDEX_PROPERTIES)) {
            final IndexInput input = dir.openInput(SnapPuller.INDEX_PROPERTIES,
                    DirectoryFactory.IOCONTEXT_NO_CACHE);

            final InputStream is = new PropertiesInputStream(input);
            try {
                p.load(new InputStreamReader(is, CHARSET_UTF_8));
            } catch (Exception e) {
                LOG.error("Unable to load " + SnapPuller.INDEX_PROPERTIES, e);
            } finally {
                IOUtils.closeQuietly(is);
            }
        }
        try {
            dir.deleteFile(SnapPuller.INDEX_PROPERTIES);
        } catch (IOException e) {
            // no problem
        }
        final IndexOutput out = dir.createOutput(SnapPuller.INDEX_PROPERTIES,
                DirectoryFactory.IOCONTEXT_NO_CACHE);
        p.put("index", tmpIdxDirName);
        Writer os = null;
        try {
            os = new OutputStreamWriter(new PropertiesOutputStream(out), CHARSET_UTF_8);
            p.store(os, SnapPuller.INDEX_PROPERTIES);
            dir.sync(Collections.singleton(INDEX_PROPERTIES));
        } catch (Exception e) {
            throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
                    "Unable to write " + SnapPuller.INDEX_PROPERTIES, e);
        } finally {
            IOUtils.closeQuietly(os);
        }
        return true;

    } catch (IOException e1) {
        throw new RuntimeException(e1);
    } finally {
        if (dir != null) {
            try {
                solrCore.getDirectoryFactory().release(dir);
            } catch (IOException e) {
                SolrException.log(LOG, "", e);
            }
        }
    }

}

From source file:org.elasticsearch.common.lucene.store.SwitchDirectory.java

License:Apache License

@Override
public void forceSync(String name) throws IOException {
    Directory dir = getDirectory(name);
    if (dir instanceof ForceSyncDirectory) {
        ((ForceSyncDirectory) dir).forceSync(name);
    } else {/*from w w w. j  a v  a2 s  . co m*/
        dir.sync(name);
    }
}

From source file:org.elasticsearch.index.store.DistributorDirectory.java

License:Apache License

@Override
public void sync(Collection<String> names) throws IOException {
    // no need to sync this operation it could be long running too
    for (Directory dir : distributor.all()) {
        dir.sync(names);
    }// w  w w .  ja  v a  2s . c o  m
}

From source file:org.elasticsearch.util.lucene.Directories.java

License:Apache License

public static void copyToDirectory(File copyFrom, Directory dir, String fileName) throws IOException {
    if (dir instanceof FSDirectory) {
        File destinationFile = new File(((FSDirectory) dir).getFile(), fileName);
        if (!destinationFile.exists()) {
            destinationFile.createNewFile();
        }/*  w ww. j a v  a 2  s .c o  m*/
        copyFile(copyFrom, destinationFile);
    } else {
        copyToDirectory(new FileInputStream(copyFrom), dir.createOutput(fileName));
    }
    if (dir instanceof ForceSyncDirectory) {
        ((ForceSyncDirectory) dir).forceSync(fileName);
    } else {
        dir.sync(fileName);
    }
}