Example usage for org.apache.lucene.index IndexWriter WRITE_LOCK_NAME

List of usage examples for org.apache.lucene.index IndexWriter WRITE_LOCK_NAME

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexWriter WRITE_LOCK_NAME.

Prototype

String WRITE_LOCK_NAME

To view the source code for org.apache.lucene.index IndexWriter WRITE_LOCK_NAME.

Click Source Link

Document

Name of the write lock in the index.

Usage

From source file:com.github.lucene.store.database.DatabaseDirectoryITest.java

License:Apache License

@Test
public void obtainLock_whenLockFileNotFound_shouldReturnLock() throws IOException {
    final Lock lock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME);
    Assert.assertNotNull(lock);/* w w w . j a v a  2  s.c  o m*/
    Assert.assertTrue(lock instanceof DatabaseReadWriteLockFactory.DatabaseReadWriteLock);
    lock.close();
}

From source file:com.github.lucene.store.database.DatabaseDirectoryITest.java

License:Apache License

@Test(expected = LockObtainFailedException.class)
public void obtainLock_whenLockFileFound_shouldThrowLockObtainFailedException() throws IOException {
    final Lock lock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME);
    try {/*from  w  w w.java  2 s .  co  m*/
        directory.obtainLock(IndexWriter.WRITE_LOCK_NAME);
    } finally {
        lock.close();
    }
}

From source file:com.github.lucene.store.database.DatabaseDirectoryITest.java

License:Apache License

@Test
public void obtainLock_whenLockFileFoundButIsClosed_shouldReturnNewLock() throws IOException {
    final Lock lock1 = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME);
    Assert.assertNotNull(lock1);//www.ja  v a  2s. c  o m
    lock1.close();

    final Lock lock2 = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME);
    Assert.assertNotNull(lock2);
    lock2.close();
}

From source file:com.github.lucene.store.jdbc.JdbcDirectoryLockITest.java

License:Apache License

@Test
public void testLocks() throws Exception {
    try {/* ww  w . j a  va  2 s.  com*/
        final Connection con1 = DataSourceUtils.getConnection(dataSource);
        final Lock lock1 = dir1.obtainLock(IndexWriter.WRITE_LOCK_NAME);
        lock1.ensureValid();

        try {
            dir2.obtainLock(IndexWriter.WRITE_LOCK_NAME);
            Assert.fail("lock2 should not have valid lock");
        } catch (final IOException e) {
        }

        lock1.close();

        DataSourceUtils.commitConnectionIfPossible(con1);
        DataSourceUtils.releaseConnection(con1);

        final Connection con2 = DataSourceUtils.getConnection(dataSource);
        final Lock lock2 = dir2.obtainLock(IndexWriter.WRITE_LOCK_NAME);
        lock2.ensureValid();
        lock2.close();

        DataSourceUtils.commitConnectionIfPossible(con2);
        DataSourceUtils.releaseConnection(con2);

    } finally {
        dir1.delete();
    }
}

From source file:com.github.lucene.store.jdbc.lock.SelectForUpdateLock.java

License:Apache License

@Override
public void initializeDatabase(final JdbcDirectory jdbcDirectory) throws IOException {
    jdbcDirectory.getJdbcTemplate().executeUpdate(jdbcDirectory.getTable().sqlInsert(),
            new JdbcTemplate.PrepateStatementAwareCallback() {
                @Override/*from w  w w. ja v a2 s  . c o  m*/
                public void fillPrepareStatement(final PreparedStatement ps) throws Exception {
                    ps.setFetchSize(1);
                    ps.setString(1, IndexWriter.WRITE_LOCK_NAME);
                    ps.setNull(2, Types.BLOB);
                    ps.setLong(3, 0);
                    ps.setBoolean(4, false);
                }
            });
}

From source file:com.github.lucene.store.jdbc.support.JdbcTable.java

License:Apache License

public JdbcTable(final JdbcDirectorySettings settings, final Dialect dialect, final String name,
        final String catalog, final String schema) {
    this.dialect = dialect;
    this.settings = settings;
    setName(name);// w  w  w  .j  a  va2  s  .  co  m
    setSchema(schema);
    setCatalog(catalog);
    nameColumn = new JdbcColumn(dialect, settings.getNameColumnName(), 1,
            dialect.getVarcharType(settings.getNameColumnLength()));
    valueColumn = new JdbcColumn(dialect, settings.getValueColumnName(), 2,
            dialect.getBlobType(settings.getValueColumnLengthInK()));
    sizeColumn = new JdbcColumn(dialect, settings.getSizeColumnName(), 3, dialect.getNumberType());
    lastModifiedColumn = new JdbcColumn(dialect, settings.getLastModifiedColumnName(), 4,
            dialect.getTimestampType());
    deletedColumn = new JdbcColumn(dialect, settings.getDeletedColumnName(), 5, dialect.getBitType());

    final StringBuffer sb = new StringBuffer();

    sqlCreate = sb.append("create table ").append(getQualifiedName()).append(" (").append(nameColumn.getName())
            .append(' ').append(nameColumn.getType()).append(" not null, ").append(valueColumn.getName())
            .append(' ').append(valueColumn.getType()).append(" , ").append(sizeColumn.getName()).append(' ')
            .append(sizeColumn.getType()).append(" , ").append(lastModifiedColumn.getName()).append(' ')
            .append(lastModifiedColumn.getType()).append(" , ").append(deletedColumn.getName()).append(' ')
            .append(deletedColumn.getType()).append(", " + "primary key (").append(nameColumn.getName())
            .append(") ) ").append(getTableTypeString(dialect)).toString();

    sb.setLength(0);
    sb.append("drop table ");
    if (dialect.supportsIfExistsBeforeTableName()) {
        sb.append("if exists ");
    }
    sb.append(getQualifiedName()).append(dialect.getCascadeConstraintsString());
    if (dialect.supportsIfExistsAfterTableName()) {
        sb.append(" if exists");
    }
    sqlDrop = sb.toString();

    sb.setLength(0);
    sqlSelectNames = sb.append("select ").append(nameColumn.getQuotedName()).append(" from ")
            .append(getQualifiedName()).append(" where ").append(deletedColumn.getQuotedName()).append(" = ?")
            .toString();

    sb.setLength(0);
    sqlSelectNameExists = sb.append("select ").append(deletedColumn.getQuotedName()).append(" from ")
            .append(getQualifiedName()).append(" where ").append(nameColumn.getQuotedName()).append(" = ?")
            .toString();

    sb.setLength(0);
    sqlSelecltLastModifiedByName = sb.append("select ").append(lastModifiedColumn.getQuotedName())
            .append(" from ").append(getQualifiedName()).append(" where ").append(nameColumn.getQuotedName())
            .append(" = ?").toString();

    sb.setLength(0);
    sqlUpdateLastModifiedByName = sb.append("update ").append(getQualifiedName()).append(" set ")
            .append(lastModifiedColumn.getQuotedName()).append(" = ")
            .append(dialect.getCurrentTimestampFunction()).append(" where ").append(nameColumn.getQuotedName())
            .append(" = ?").toString();

    sb.setLength(0);
    sqlDeleteByName = sb.append("delete from ").append(getQualifiedName()).append(" where ")
            .append(nameColumn.getQuotedName()).append(" = ?").toString();

    sb.setLength(0);
    sqlDeletaMarkDeleteByDelta = sb.append("delete from ").append(getQualifiedName()).append(" where ")
            .append(deletedColumn.getQuotedName()).append(" = ?").append(" and ")
            .append(lastModifiedColumn.getQuotedName()).append(" < ?").toString();

    sb.setLength(0);
    sqlUpdateNameByName = sb.append("update ").append(getQualifiedName()).append(" set ")
            .append(nameColumn.getQuotedName()).append(" = ?" + " where ").append(nameColumn.getQuotedName())
            .append(" = ?").toString();

    sb.setLength(0);
    sqlSelectNameForUpdateNoWait = sb.append("select ").append(nameColumn.getQuotedName()).append(" from ")
            .append(getQualifiedName()).append(" where ").append(nameColumn.getQuotedName()).append(" = ?")
            .append(dialect.getForUpdateNowaitString()).toString();

    sb.setLength(0);
    sqlSelectSizeByName = sb.append("select ").append(sizeColumn.getQuotedName()).append(" from ")
            .append(getQualifiedName()).append(" where ").append(nameColumn.getQuotedName()).append(" = ?")
            .toString();

    sb.setLength(0);
    sqlInsert = sb.append("insert into ").append(getQualifiedName()).append(" (")
            .append(nameColumn.getQuotedName()).append(", ").append(valueColumn.getQuotedName()).append(", ")
            .append(sizeColumn.getQuotedName()).append(", ").append(lastModifiedColumn.getQuotedName())
            .append(", ").append(deletedColumn.getQuotedName()).append(") values ( ?, ?, ?, ")
            .append(dialect.getCurrentTimestampFunction()).append(", ?").append(" )").toString();

    sb.setLength(0);
    sqlUpdateSizeLastModifiedByName = sb.append("update ").append(getQualifiedName()).append(" set ")
            .append(sizeColumn.getQuotedName()).append(" = ? , ").append(lastModifiedColumn.getQuotedName())
            .append(" = ").append(dialect.getCurrentTimestampFunction()).append(" where ")
            .append(nameColumn.getQuotedName()).append(" = ?").toString();

    sb.setLength(0);
    sqlMarkDeleteByName = sb.append("update ").append(getQualifiedName()).append(" set ")
            .append(deletedColumn.getQuotedName()).append(" = ? , ").append(lastModifiedColumn.getQuotedName())
            .append(" = ").append(dialect.getCurrentTimestampFunction()).append(" where ")
            .append(nameColumn.getQuotedName()).append(" = ?").toString();

    sb.setLength(0);
    sqlSelectSizeValueByName = sb.append("select ").append(nameColumn.getQuotedName()).append(", ")
            .append(dialect.openBlobSelectQuote()).append(valueColumn.getQuotedName())
            .append(dialect.closeBlobSelectQuote()).append(" as x").append(", ")
            .append(sizeColumn.getQuotedName()).append(" from ").append(getQualifiedName()).append(" where ")
            .append(nameColumn.getQuotedName()).append(" = ?").toString();

    sb.setLength(0);
    sqlDeletaAll = sb.append("delete from ").append(getQualifiedName()).append(" where ")
            .append(nameColumn.getQuotedName()).append(" <> '").append(IndexWriter.WRITE_LOCK_NAME).append("'")
            .toString();
}

From source file:net.homeip.donaldm.doxmentor4j.indexers.IndexFactory.java

License:Open Source License

static public void create(File archiveFile, String archiveIndexDirName, String indexDirName, Analyzer analyzer,
        boolean isCreate, boolean isReadOnly) throws IOException
//---------------------------------------------------------------------------
{
    if ((m_indexWriter != null) && (!isReadOnly))
        try {//from   w  ww  .ja  v  a  2s  .  c o  m
            m_indexWriter.close();
        } catch (Exception e) {
        }
    boolean isClosed = false;
    if (mLuceneDirectory == null)
        isClosed = true;
    else
        try {
            mLuceneDirectory.close();
            isClosed = true;
        } catch (Exception e) {
        }
    m_analyzer = analyzer;
    if (archiveIndexDirName != null) {
        if ((!isClosed) && (mLuceneDirectory != null))
            ((ArchiveDirectory) mLuceneDirectory).clearLock(IndexWriter.WRITE_LOCK_NAME);
        mLuceneDirectory = ArchiveDirectory.getDirectory(archiveFile, archiveIndexDirName, isCreate);
        mDirectory = ((ArchiveDirectory) mLuceneDirectory).getTempDirectory();
    } else if (indexDirName != null) {
        java.io.File f = new File(indexDirName);
        if ((f.exists()) && (!f.isDirectory()))
            f.delete();
        if (!f.exists())
            f.mkdirs();
        File ff;
        try {
            ff = File.createTempFile("tst", ".tmp", f);
        } catch (Exception _e) {
            ff = null;
        }
        if ((ff == null) || (!ff.exists())) {
            System.out.println("Read-Only media for " + indexDirName + ". Disabling locks");
            logger.info("Read-Only media for " + indexDirName + ". Disabling locks");
            if (isClosed) {
                mLuceneDirectory = FSDirectory.open(f, NoLockFactory.getNoLockFactory());
                mDirectory = f;
            }
        } else {
            if (ff != null)
                ff.delete();
            if (isClosed) {
                mLuceneDirectory = FSDirectory.open(f);
                mDirectory = f;
            }
        }
    } else
        throw new IOException("Index directory invalid");
    if (!isReadOnly)
        m_indexWriter = new IndexWriter(mLuceneDirectory, m_analyzer, isCreate,
                IndexWriter.MaxFieldLength.UNLIMITED);
}

From source file:org.apache.maven.index.context.DefaultIndexingContext.java

License:Apache License

private static void unlockForcibly(final TrackingLockFactory lockFactory, final Directory dir)
        throws IOException {
    //Warning: Not doable in lucene >= 5.3 consider to remove it as IndexWriter.unlock
    //was always strongly non recommended by Lucene.
    //For now try to do the best to simulate the IndexWriter.unlock at least on FSDirectory
    //using FSLockFactory, the RAMDirectory uses SingleInstanceLockFactory.
    //custom lock factory?
    if (lockFactory != null) {
        final Set<? extends Lock> emittedLocks = lockFactory.getEmittedLocks(IndexWriter.WRITE_LOCK_NAME);
        for (Lock emittedLock : emittedLocks) {
            emittedLock.close();/* w  w  w . j a v  a 2 s .co m*/
        }
    }
    if (dir instanceof FSDirectory) {
        final FSDirectory fsdir = (FSDirectory) dir;
        final Path dirPath = fsdir.getDirectory();
        if (Files.isDirectory(dirPath)) {
            Path lockPath = dirPath.resolve(IndexWriter.WRITE_LOCK_NAME);
            try {
                lockPath = lockPath.toRealPath();
            } catch (IOException ioe) {
                // Not locked
                return;
            }
            try (final FileChannel fc = FileChannel.open(lockPath, StandardOpenOption.CREATE,
                    StandardOpenOption.WRITE)) {
                final FileLock lck = fc.tryLock();
                if (lck == null) {
                    // Still active
                    throw new LockObtainFailedException("Lock held by another process: " + lockPath);
                } else {
                    // Not held fine to release
                    lck.close();
                }
            }
            Files.delete(lockPath);
        }
    }
}

From source file:org.codelibs.elasticsearch.common.lucene.Lucene.java

License:Apache License

/**
 * This method removes all files from the given directory that are not referenced by the given segments file.
 * This method will open an IndexWriter and relies on index file deleter to remove all unreferenced files. Segment files
 * that are newer than the given segments file are removed forcefully to prevent problems with IndexWriter opening a potentially
 * broken commit point / leftover.// w ww.  jav  a  2  s.  c om
 * <b>Note:</b> this method will fail if there is another IndexWriter open on the given directory. This method will also acquire
 * a write lock from the directory while pruning unused files. This method expects an existing index in the given directory that has
 * the given segments file.
 */
public static SegmentInfos pruneUnreferencedFiles(String segmentsFileName, Directory directory)
        throws IOException {
    final SegmentInfos si = readSegmentInfos(segmentsFileName, directory);
    try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
        int foundSegmentFiles = 0;
        for (final String file : directory.listAll()) {
            /**
             * we could also use a deletion policy here but in the case of snapshot and restore
             * sometimes we restore an index and override files that were referenced by a "future"
             * commit. If such a commit is opened by the IW it would likely throw a corrupted index exception
             * since checksums don's match anymore. that's why we prune the name here directly.
             * We also want the caller to know if we were not able to remove a segments_N file.
             */
            if (file.startsWith(IndexFileNames.SEGMENTS) || file.equals(IndexFileNames.OLD_SEGMENTS_GEN)) {
                foundSegmentFiles++;
                if (file.equals(si.getSegmentsFileName()) == false) {
                    directory.deleteFile(file); // remove all segment_N files except of the one we wanna keep
                }
            }
        }
        assert SegmentInfos.getLastCommitSegmentsFileName(directory).equals(segmentsFileName);
        if (foundSegmentFiles == 0) {
            throw new IllegalStateException("no commit found in the directory");
        }
    }
    final CommitPoint cp = new CommitPoint(si, directory);
    try (IndexWriter writer = new IndexWriter(directory,
            new IndexWriterConfig(Lucene.STANDARD_ANALYZER).setIndexCommit(cp).setCommitOnClose(false)
                    .setMergePolicy(NoMergePolicy.INSTANCE).setOpenMode(IndexWriterConfig.OpenMode.APPEND))) {
        // do nothing and close this will kick of IndexFileDeleter which will remove all pending files
    }
    return si;
}

From source file:org.codelibs.elasticsearch.common.lucene.Lucene.java

License:Apache License

/**
 * This method removes all lucene files from the given directory. It will first try to delete all commit points / segments
 * files to ensure broken commits or corrupted indices will not be opened in the future. If any of the segment files can't be deleted
 * this operation fails./*from   w  w  w. j  a v  a2  s.c o  m*/
 */
public static void cleanLuceneIndex(Directory directory) throws IOException {
    try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
        for (final String file : directory.listAll()) {
            if (file.startsWith(IndexFileNames.SEGMENTS) || file.equals(IndexFileNames.OLD_SEGMENTS_GEN)) {
                directory.deleteFile(file); // remove all segment_N files
            }
        }
    }
    try (IndexWriter writer = new IndexWriter(directory,
            new IndexWriterConfig(Lucene.STANDARD_ANALYZER).setMergePolicy(NoMergePolicy.INSTANCE) // no merges
                    .setCommitOnClose(false) // no commits
                    .setOpenMode(IndexWriterConfig.OpenMode.CREATE))) // force creation - don't append...
    {
        // do nothing and close this will kick of IndexFileDeleter which will remove all pending files
    }
}