Example usage for org.apache.hadoop.fs FileSystem getLocal

List of usage examples for org.apache.hadoop.fs FileSystem getLocal

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem getLocal.

Prototype

public static LocalFileSystem getLocal(Configuration conf) throws IOException 

Source Link

Document

Get the local FileSystem.

Usage

From source file:org.apache.accumulo.server.logger.LogWriter.java

License:Apache License

@Override
public LogCopyInfo startCopy(TInfo info, AuthInfo credentials, final String localLog,
        final String fullyQualifiedFileName, final boolean sort) {
    log.info("Copying " + localLog + " to " + fullyQualifiedFileName);
    final long t1 = System.currentTimeMillis();
    try {//from   w w w .  j  a v a2  s.  co  m
        Long id = file2id.get(localLog);
        if (id != null)
            close(info, id);
    } catch (NoSuchLogIDException e) {
        log.error("Unexpected error thrown", e);
        throw new RuntimeException(e);
    }
    File file;
    try {
        file = new File(findLocalFilename(localLog));
        log.info(file.getAbsoluteFile().toString());
    } catch (FileNotFoundException ex) {
        throw new RuntimeException(ex);
    }
    long result = file.length();

    copyThreadPool.execute(new Runnable() {
        @Override
        public void run() {
            Thread.currentThread().setName("Copying " + localLog + " to shared file system");
            for (int i = 0; i < 3; i++) {
                try {
                    if (sort) {
                        copySortLog(localLog, fullyQualifiedFileName);
                    } else {
                        copyLog(localLog, fullyQualifiedFileName);
                    }
                    return;
                } catch (IOException e) {
                    log.error("error during copy", e);
                }
                UtilWaitThread.sleep(1000);
            }
            log.error("Unable to copy file to DFS, too many retries " + localLog);
            try {
                fs.create(new Path(fullyQualifiedFileName + ".failed")).close();
            } catch (IOException ex) {
                log.error("Unable to create failure flag file", ex);
            }
            long t2 = System.currentTimeMillis();
            if (metrics.isEnabled())
                metrics.add(LogWriterMetrics.copy, (t2 - t1));
        }

        private void copySortLog(String localLog, String fullyQualifiedFileName) throws IOException {
            final long SORT_BUFFER_SIZE = acuConf.getMemoryInBytes(Property.LOGGER_SORT_BUFFER_SIZE);

            FileSystem local = TraceFileSystem.wrap(FileSystem.getLocal(fs.getConf()).getRaw());
            Path dest = new Path(fullyQualifiedFileName + ".recovered");
            log.debug("Sorting log file to DSF " + dest);
            fs.mkdirs(dest);
            int part = 0;

            Reader reader = new SequenceFile.Reader(local, new Path(findLocalFilename(localLog)), fs.getConf());
            try {
                final ArrayList<Pair<LogFileKey, LogFileValue>> kv = new ArrayList<Pair<LogFileKey, LogFileValue>>();
                long memorySize = 0;
                while (true) {
                    final long position = reader.getPosition();
                    final LogFileKey key = new LogFileKey();
                    final LogFileValue value = new LogFileValue();
                    try {
                        if (!reader.next(key, value))
                            break;
                    } catch (EOFException e) {
                        log.warn("Unexpected end of file reading write ahead log " + localLog);
                        break;
                    }
                    kv.add(new Pair<LogFileKey, LogFileValue>(key, value));
                    memorySize += reader.getPosition() - position;
                    if (memorySize > SORT_BUFFER_SIZE) {
                        writeSortedEntries(dest, part++, kv);
                        kv.clear();
                        memorySize = 0;
                    }
                }

                if (!kv.isEmpty())
                    writeSortedEntries(dest, part++, kv);
                fs.create(new Path(dest, "finished")).close();
            } finally {
                reader.close();
            }
        }

        private void writeSortedEntries(Path dest, int part, final List<Pair<LogFileKey, LogFileValue>> kv)
                throws IOException {
            String path = dest + String.format("/part-r-%05d", part);
            log.debug("Writing partial log file to DSF " + path);
            log.debug("Sorting");
            Span span = Trace.start("Logger sort");
            span.data("logfile", dest.getName());
            Collections.sort(kv, new Comparator<Pair<LogFileKey, LogFileValue>>() {
                @Override
                public int compare(Pair<LogFileKey, LogFileValue> o1, Pair<LogFileKey, LogFileValue> o2) {
                    return o1.getFirst().compareTo(o2.getFirst());
                }
            });
            span.stop();
            span = Trace.start("Logger write");
            span.data("logfile", dest.getName());
            MapFile.Writer writer = new MapFile.Writer(fs.getConf(), fs, path, LogFileKey.class,
                    LogFileValue.class);
            short replication = (short) acuConf.getCount(Property.LOGGER_RECOVERY_FILE_REPLICATION);
            fs.setReplication(new Path(path + "/" + MapFile.DATA_FILE_NAME), replication);
            fs.setReplication(new Path(path + "/" + MapFile.INDEX_FILE_NAME), replication);
            try {
                for (Pair<LogFileKey, LogFileValue> entry : kv)
                    writer.append(entry.getFirst(), entry.getSecond());
            } finally {
                writer.close();
                span.stop();
            }
        }

        private void copyLog(final String localLog, final String fullyQualifiedFileName) throws IOException {
            Path dest = new Path(fullyQualifiedFileName + ".copy");
            log.debug("Copying log file to DSF " + dest);
            fs.delete(dest, true);
            LogFileKey key = new LogFileKey();
            LogFileValue value = new LogFileValue();
            Writer writer = null;
            Reader reader = null;
            try {
                short replication = (short) acuConf.getCount(Property.LOGGER_RECOVERY_FILE_REPLICATION);
                writer = SequenceFile.createWriter(fs, fs.getConf(), dest, LogFileKey.class, LogFileValue.class,
                        fs.getConf().getInt("io.file.buffer.size", 4096), replication, fs.getDefaultBlockSize(),
                        SequenceFile.CompressionType.BLOCK, new DefaultCodec(), null, new Metadata());
                FileSystem local = TraceFileSystem.wrap(FileSystem.getLocal(fs.getConf()).getRaw());
                reader = new SequenceFile.Reader(local, new Path(findLocalFilename(localLog)), fs.getConf());
                while (reader.next(key, value)) {
                    writer.append(key, value);
                }
            } catch (IOException ex) {
                log.warn("May have a partial copy of a recovery file: " + localLog, ex);
            } finally {
                if (reader != null)
                    reader.close();
                if (writer != null)
                    writer.close();
            }
            // Make file appear in the shared file system as the target name only after it is completely copied
            fs.rename(dest, new Path(fullyQualifiedFileName));
            log.info("Copying " + localLog + " complete");
        }
    });
    return new LogCopyInfo(result, null);
}

From source file:org.apache.accumulo.server.logger.TestLogWriter.java

License:Apache License

@Before
public void setUp() throws Exception {
    // suppress log messages having to do with not having an instance
    Logger.getLogger(ZooConfiguration.class).setLevel(Level.OFF);
    Logger.getLogger(HdfsZooInstance.class).setLevel(Level.OFF);
    if (fs == null) {
        fs = FileSystem.getLocal(CachedConfiguration.getInstance());
    }/*ww w .  j a  va  2 s  . c  o  m*/
    writer = new LogWriter(ServerConfiguration.getDefaultConfiguration(), fs,
            Collections.singletonList("target"), INSTANCE_ID, 1, false);
    Logger.getLogger(LogWriter.class).setLevel(Level.FATAL);
}

From source file:org.apache.accumulo.server.ServerConstantsTest.java

License:Apache License

private ArrayList<String> init(File newFile, List<String> uuids, List<Integer> dataVersions)
        throws IllegalArgumentException, IOException {
    String base = newFile.toURI().toString();

    LocalFileSystem fs = FileSystem.getLocal(new Configuration());

    ArrayList<String> accumuloPaths = new ArrayList<>();

    for (int i = 0; i < uuids.size(); i++) {
        String volume = "v" + i;

        String accumuloPath = base + "/" + volume + "/accumulo";
        accumuloPaths.add(accumuloPath);

        if (uuids.get(i) != null) {
            fs.mkdirs(new Path(accumuloPath + "/" + ServerConstants.INSTANCE_ID_DIR));
            fs.createNewFile(//  w w w .j  a v a  2  s  .c  o  m
                    new Path(accumuloPath + "/" + ServerConstants.INSTANCE_ID_DIR + "/" + uuids.get(i)));
        }

        if (dataVersions.get(i) != null) {
            fs.mkdirs(new Path(accumuloPath + "/" + ServerConstants.VERSION_DIR));
            fs.createNewFile(
                    new Path(accumuloPath + "/" + ServerConstants.VERSION_DIR + "/" + dataVersions.get(i)));
        }
    }

    return accumuloPaths;
}

From source file:org.apache.accumulo.server.tabletserver.MemKeyComparator.java

License:Apache License

public void delete(long waitTime) {

    synchronized (this) {
        if (deleted)
            throw new IllegalStateException("Double delete");

        deleted = true;/* www.ja  va 2  s. c  o  m*/
    }

    long t1 = System.currentTimeMillis();

    while (activeIters.size() > 0 && System.currentTimeMillis() - t1 < waitTime) {
        UtilWaitThread.sleep(50);
    }

    if (activeIters.size() > 0) {
        // dump memmap exactly as is to a tmp file on disk, and switch scans to that temp file
        try {
            Configuration conf = CachedConfiguration.getInstance();
            FileSystem fs = TraceFileSystem.wrap(FileSystem.getLocal(conf));

            String tmpFile = memDumpDir + "/memDump" + UUID.randomUUID() + "." + RFile.EXTENSION;

            Configuration newConf = new Configuration(conf);
            newConf.setInt("io.seqfile.compress.blocksize", 100000);

            FileSKVWriter out = new RFileOperations().openWriter(tmpFile, fs, newConf,
                    ServerConfiguration.getSiteConfiguration());

            InterruptibleIterator iter = map.skvIterator();

            HashSet<ByteSequence> allfams = new HashSet<ByteSequence>();

            for (Entry<String, Set<ByteSequence>> entry : lggroups.entrySet()) {
                allfams.addAll(entry.getValue());
                out.startNewLocalityGroup(entry.getKey(), entry.getValue());
                iter.seek(new Range(), entry.getValue(), true);
                dumpLocalityGroup(out, iter);
            }

            out.startDefaultLocalityGroup();
            iter.seek(new Range(), allfams, false);

            dumpLocalityGroup(out, iter);

            out.close();

            log.debug("Created mem dump file " + tmpFile);

            memDumpFile = tmpFile;

            synchronized (activeIters) {
                for (MemoryIterator mi : activeIters) {
                    mi.switchNow();
                }
            }

            // rely on unix behavior that file will be deleted when last
            // reader closes it
            fs.delete(new Path(memDumpFile), true);

        } catch (IOException ioe) {
            log.error("Failed to create mem dump file ", ioe);

            while (activeIters.size() > 0) {
                UtilWaitThread.sleep(100);
            }
        }

    }

    SimpleMap tmpMap = map;

    synchronized (this) {
        map = null;
    }

    tmpMap.delete();
}

From source file:org.apache.accumulo.test.functional.WriteAheadLogEncryptedIT.java

License:Apache License

@Override
public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
    String keyPath = System.getProperty("user.dir") + "/target/mini-tests/WriteAheadLogEncryptedIT-testkeyfile";
    cfg.setProperty(Property.INSTANCE_CRYPTO_SERVICE, "org.apache.accumulo.core.cryptoImpl.AESCryptoService");
    cfg.setProperty(INSTANCE_CRYPTO_PREFIX.getKey() + "key.uri", keyPath);

    WriteAheadLogIT.setupConfig(cfg, hadoopCoreSite);

    // setup key file
    try {//from  ww w  .  j av  a 2  s. c om
        Path keyFile = new Path(keyPath);
        FileSystem fs = FileSystem.getLocal(new Configuration());
        fs.delete(keyFile, true);
        if (fs.createNewFile(keyFile))
            log.info("Created keyfile at {}", keyPath);
        else
            log.error("Failed to create key file at {}", keyPath);

        try (FSDataOutputStream out = fs.create(keyFile)) {
            out.writeUTF("sixteenbytekey"); // 14 + 2 from writeUTF
        }
    } catch (Exception e) {
        log.error("Exception during configure", e);
    }
}

From source file:org.apache.accumulo.test.replication.UnusedWalDoesntCloseReplicationStatusIT.java

License:Apache License

@Test
public void test() throws Exception {
    File accumuloDir = this.getCluster().getConfig().getAccumuloDir();
    final Connector conn = getConnector();
    final String tableName = getUniqueNames(1)[0];

    conn.securityOperations().grantTablePermission("root", MetadataTable.NAME, TablePermission.WRITE);
    conn.tableOperations().create(tableName);

    final String tableId = conn.tableOperations().tableIdMap().get(tableName);
    final int numericTableId = Integer.parseInt(tableId);
    final int fakeTableId = numericTableId + 1;

    Assert.assertNotNull("Did not find table ID", tableId);

    conn.tableOperations().setProperty(tableName, Property.TABLE_REPLICATION.getKey(), "true");
    conn.tableOperations().setProperty(tableName, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
    // just sleep
    conn.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + "cluster1",
            ReplicaSystemFactory.getPeerConfigurationValue(MockReplicaSystem.class, "50000"));

    FileSystem fs = FileSystem.getLocal(new Configuration());
    File tserverWalDir = new File(accumuloDir, ServerConstants.WAL_DIR + Path.SEPARATOR + "faketserver+port");
    File tserverWal = new File(tserverWalDir, UUID.randomUUID().toString());
    fs.mkdirs(new Path(tserverWalDir.getAbsolutePath()));

    // Make a fake WAL with no data in it for our real table
    FSDataOutputStream out = fs.create(new Path(tserverWal.getAbsolutePath()));

    out.write(DfsLogger.LOG_FILE_HEADER_V3.getBytes(UTF_8));

    DataOutputStream dos = new DataOutputStream(out);
    dos.writeUTF("NullCryptoModule");

    // Fake a single update WAL that has a mutation for another table
    LogFileKey key = new LogFileKey();
    LogFileValue value = new LogFileValue();

    key.event = OPEN;/*from w ww  .j  ava 2  s.c om*/
    key.tserverSession = tserverWal.getAbsolutePath();
    key.filename = tserverWal.getAbsolutePath();
    key.write(out);
    value.write(out);

    key.event = LogEvents.DEFINE_TABLET;
    key.tablet = new KeyExtent(Integer.toString(fakeTableId), null, null);
    key.seq = 1l;
    key.tid = 1;

    key.write(dos);
    value.write(dos);

    key.tablet = null;
    key.event = LogEvents.MUTATION;
    key.filename = tserverWal.getAbsolutePath();
    value.mutations = Arrays.<Mutation>asList(new ServerMutation(new Text("row")));

    key.write(dos);
    value.write(dos);

    key.event = LogEvents.COMPACTION_START;
    key.filename = accumuloDir.getAbsolutePath() + "/tables/" + fakeTableId + "/t-000001/A000001.rf";
    value.mutations = Collections.emptyList();

    key.write(dos);
    value.write(dos);

    key.event = LogEvents.COMPACTION_FINISH;
    value.mutations = Collections.emptyList();

    key.write(dos);
    value.write(dos);

    dos.close();

    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
    Mutation m = new Mutation("m");
    m.put("m", "m", "M");
    bw.addMutation(m);
    bw.close();

    log.info("State of metadata table after inserting a record");

    Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
    s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
    for (Entry<Key, Value> entry : s) {
        System.out.println(entry.getKey().toStringNoTruncate() + " " + entry.getValue());
    }

    s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
    s.setRange(MetadataSchema.ReplicationSection.getRange());
    for (Entry<Key, Value> entry : s) {
        System.out.println(entry.getKey().toStringNoTruncate() + " "
                + ProtobufUtil.toString(Status.parseFrom(entry.getValue().get())));
    }

    log.info("Offline'ing table");

    conn.tableOperations().offline(tableName, true);

    // Add our fake WAL to the log column for this table
    String walUri = tserverWal.toURI().toString();
    KeyExtent extent = new KeyExtent(tableId, null, null);
    bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    m = new Mutation(extent.getMetadataEntry());
    m.put(MetadataSchema.TabletsSection.LogColumnFamily.NAME, new Text("localhost:12345/" + walUri),
            new Value((walUri + "|1").getBytes(UTF_8)));
    bw.addMutation(m);

    // Add a replication entry for our fake WAL
    m = new Mutation(MetadataSchema.ReplicationSection.getRowPrefix() + new Path(walUri).toString());
    m.put(MetadataSchema.ReplicationSection.COLF, new Text(tableId),
            new Value(StatusUtil.fileCreated(System.currentTimeMillis()).toByteArray()));
    bw.addMutation(m);
    bw.close();

    log.info("State of metadata after injecting WAL manually");

    s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
    s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
    for (Entry<Key, Value> entry : s) {
        log.info(entry.getKey().toStringNoTruncate() + " " + entry.getValue());
    }

    s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
    s.setRange(MetadataSchema.ReplicationSection.getRange());
    for (Entry<Key, Value> entry : s) {
        log.info(entry.getKey().toStringNoTruncate() + " "
                + ProtobufUtil.toString(Status.parseFrom(entry.getValue().get())));
    }

    log.info("Bringing table online");
    conn.tableOperations().online(tableName, true);

    Assert.assertEquals(1, Iterables.size(conn.createScanner(tableName, Authorizations.EMPTY)));

    log.info("Table has performed recovery, state of metadata:");

    s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
    s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
    for (Entry<Key, Value> entry : s) {
        log.info(entry.getKey().toStringNoTruncate() + " " + entry.getValue());
    }

    s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
    s.setRange(MetadataSchema.ReplicationSection.getRange());
    for (Entry<Key, Value> entry : s) {
        Status status = Status.parseFrom(entry.getValue().get());
        log.info(entry.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(status));
        Assert.assertFalse("Status record was closed and it should not be", status.getClosed());
    }
}

From source file:org.apache.accumulo.test.ShellServerIT.java

License:Apache License

@Test
public void exporttableImporttable() throws Exception {
    final String table = name.getMethodName(), table2 = table + "2";

    // exporttable / importtable
    ts.exec("createtable " + table + " -evc", true);
    make10();//from w  ww.  jav a  2  s  .c om
    ts.exec("addsplits row5", true);
    ts.exec("config -t " + table + " -s table.split.threshold=345M", true);
    ts.exec("offline " + table, true);
    File exportDir = new File(rootPath, "ShellServerIT.export");
    String exportUri = "file://" + exportDir.toString();
    String localTmp = "file://" + new File(rootPath, "ShellServerIT.tmp").toString();
    ts.exec("exporttable -t " + table + " " + exportUri, true);
    DistCp cp = newDistCp(new Configuration(false));
    String import_ = "file://" + new File(rootPath, "ShellServerIT.import").toString();
    if (getCluster().getClientConfig().getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
        // DistCp bugs out trying to get a fs delegation token to perform the cp. Just copy it ourselves by hand.
        FileSystem fs = getCluster().getFileSystem();
        FileSystem localFs = FileSystem.getLocal(new Configuration(false));

        // Path on local fs to cp into
        Path localTmpPath = new Path(localTmp);
        localFs.mkdirs(localTmpPath);

        // Path in remote fs to importtable from
        Path importDir = new Path(import_);
        fs.mkdirs(importDir);

        // Implement a poor-man's DistCp
        try (BufferedReader reader = new BufferedReader(new FileReader(new File(exportDir, "distcp.txt")))) {
            for (String line; (line = reader.readLine()) != null;) {
                Path exportedFile = new Path(line);
                // There isn't a cp on FileSystem??
                log.info("Copying " + line + " to " + localTmpPath);
                fs.copyToLocalFile(exportedFile, localTmpPath);
                Path tmpFile = new Path(localTmpPath, exportedFile.getName());
                log.info("Moving " + tmpFile + " to the import directory " + importDir);
                fs.moveFromLocalFile(tmpFile, importDir);
            }
        }
    } else {
        String[] distCpArgs = new String[] { "-f", exportUri + "/distcp.txt", import_ };
        assertEquals("Failed to run distcp: " + Arrays.toString(distCpArgs), 0, cp.run(distCpArgs));
    }
    ts.exec("importtable " + table2 + " " + import_, true);
    ts.exec("config -t " + table2 + " -np", true, "345M", true);
    ts.exec("getsplits -t " + table2, true, "row5", true);
    ts.exec("constraint --list -t " + table2, true, "VisibilityConstraint=2", true);
    ts.exec("online " + table, true);
    ts.exec("deletetable -f " + table, true);
    ts.exec("deletetable -f " + table2, true);
}

From source file:org.apache.accumulo.tserver.DirectoryDecommissionerTest.java

License:Apache License

@Test
public void testSame() throws Exception {
    FileSystem fs = FileSystem.getLocal(new Configuration());

    Path subdir1 = new Path(tempFolder.newFolder().toURI());
    Path subdir2 = new Path(tempFolder.newFolder().toURI());
    Path subdir3 = new Path(tempFolder.newFolder().toURI());

    Assert.assertFalse(DirectoryDecommissioner.same(fs, subdir1, fs,
            new Path(tempFolder.getRoot().toURI().toString(), "8854339269459287524098238497")));
    Assert.assertFalse(DirectoryDecommissioner.same(fs,
            new Path(tempFolder.getRoot().toURI().toString(), "8854339269459287524098238497"), fs, subdir1));
    Assert.assertTrue(DirectoryDecommissioner.same(fs, subdir1, fs, subdir1));

    writeFile(fs, subdir1, "abc", "foo");
    writeFile(fs, subdir2, "abc", "bar");
    writeFile(fs, subdir3, "abc", "foo");

    Assert.assertTrue(DirectoryDecommissioner.same(fs, subdir1, fs, subdir1));
    Assert.assertFalse(DirectoryDecommissioner.same(fs, subdir1, fs, subdir2));
    Assert.assertFalse(DirectoryDecommissioner.same(fs, subdir2, fs, subdir1));
    Assert.assertTrue(DirectoryDecommissioner.same(fs, subdir1, fs, subdir3));
    Assert.assertTrue(DirectoryDecommissioner.same(fs, subdir3, fs, subdir1));

    writeFile(fs, subdir1, "def", "123456");
    writeFile(fs, subdir2, "def", "123456");
    writeFile(fs, subdir3, "def", "123456");

    Assert.assertTrue(DirectoryDecommissioner.same(fs, subdir1, fs, subdir1));
    Assert.assertFalse(DirectoryDecommissioner.same(fs, subdir1, fs, subdir2));
    Assert.assertFalse(DirectoryDecommissioner.same(fs, subdir2, fs, subdir1));
    Assert.assertTrue(DirectoryDecommissioner.same(fs, subdir1, fs, subdir3));
    Assert.assertTrue(DirectoryDecommissioner.same(fs, subdir3, fs, subdir1));

    writeFile(fs, subdir3, "ghi", "09876");

    Assert.assertFalse(DirectoryDecommissioner.same(fs, subdir1, fs, subdir3));
    Assert.assertFalse(DirectoryDecommissioner.same(fs, subdir3, fs, subdir1));

    fs.mkdirs(new Path(subdir2, "dir1"));

    try {/*from  w w w. jav a2s  . c  o  m*/
        DirectoryDecommissioner.same(fs, subdir1, fs, subdir2);
        Assert.fail();
    } catch (IllegalArgumentException e) {
    }

    try {
        DirectoryDecommissioner.same(fs, subdir2, fs, subdir1);
        Assert.fail();
    } catch (IllegalArgumentException e) {
    }

    try {
        DirectoryDecommissioner.same(fs, subdir1, fs, new Path(subdir2, "def"));
        Assert.fail();
    } catch (IllegalArgumentException e) {
    }

    try {
        DirectoryDecommissioner.same(fs, new Path(subdir2, "def"), fs, subdir3);
        Assert.fail();
    } catch (IllegalArgumentException e) {
    }

}

From source file:org.apache.accumulo.tserver.InMemoryMap.java

License:Apache License

public void delete(long waitTime) {

    synchronized (this) {
        if (deleted)
            throw new IllegalStateException("Double delete");

        deleted = true;//from   www  . j a  va2  s. c o m
    }

    long t1 = System.currentTimeMillis();

    while (activeIters.size() > 0 && System.currentTimeMillis() - t1 < waitTime) {
        sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
    }

    if (activeIters.size() > 0) {
        // dump memmap exactly as is to a tmp file on disk, and switch scans to that temp file
        try {
            Configuration conf = CachedConfiguration.getInstance();
            FileSystem fs = FileSystem.getLocal(conf);

            String tmpFile = memDumpDir + "/memDump" + UUID.randomUUID() + "." + RFile.EXTENSION;

            Configuration newConf = new Configuration(conf);
            newConf.setInt("io.seqfile.compress.blocksize", 100000);

            AccumuloConfiguration siteConf = SiteConfiguration.getInstance();

            if (getOrCreateSampler() != null) {
                siteConf = createSampleConfig(siteConf);
            }

            FileSKVWriter out = new RFileOperations().newWriterBuilder().forFile(tmpFile, fs, newConf)
                    .withTableConfiguration(siteConf).build();

            InterruptibleIterator iter = map.skvIterator(null);

            HashSet<ByteSequence> allfams = new HashSet<>();

            for (Entry<String, Set<ByteSequence>> entry : lggroups.entrySet()) {
                allfams.addAll(entry.getValue());
                out.startNewLocalityGroup(entry.getKey(), entry.getValue());
                iter.seek(new Range(), entry.getValue(), true);
                dumpLocalityGroup(out, iter);
            }

            out.startDefaultLocalityGroup();
            iter.seek(new Range(), allfams, false);

            dumpLocalityGroup(out, iter);

            out.close();

            log.debug("Created mem dump file " + tmpFile);

            memDumpFile = tmpFile;

            synchronized (activeIters) {
                for (MemoryIterator mi : activeIters) {
                    mi.switchNow();
                }
            }

            // rely on unix behavior that file will be deleted when last
            // reader closes it
            fs.delete(new Path(memDumpFile), true);

        } catch (IOException ioe) {
            log.error("Failed to create mem dump file ", ioe);

            while (activeIters.size() > 0) {
                sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
            }
        }

    }

    SimpleMap tmpMap = map;

    synchronized (this) {
        map = null;
    }

    tmpMap.delete();
}

From source file:org.apache.accumulo.tserver.log.LocalWALRecovery.java

License:Apache License

public void recoverLocalWriteAheadLogs(FileSystem fs) throws IOException {
    for (String directory : options.directories) {
        File localDirectory = new File(directory);
        if (!localDirectory.isAbsolute()) {
            localDirectory = new File(System.getenv("ACCUMULO_HOME"), directory);
        }// w w  w  . j  av  a 2  s  .  c o m

        if (!localDirectory.isDirectory()) {
            log.warn("Local walog dir " + localDirectory.getAbsolutePath()
                    + " does not exist or is not a directory.");
            continue;
        }

        if (options.destination == null) {
            // Defer loading the default value until now because it might require talking to zookeeper.
            options.destination = ServerConstants.getWalDirs()[0];
        }
        log.info("Copying WALs to " + options.destination);

        for (File file : localDirectory.listFiles()) {
            String name = file.getName();
            try {
                UUID.fromString(name);
            } catch (IllegalArgumentException ex) {
                log.info("Ignoring non-log file " + file.getAbsolutePath());
                continue;
            }

            LogFileKey key = new LogFileKey();
            LogFileValue value = new LogFileValue();

            log.info("Openning local log " + file.getAbsolutePath());

            Path localWal = new Path(file.toURI());
            FileSystem localFs = FileSystem.getLocal(fs.getConf());

            Reader reader = new SequenceFile.Reader(localFs, localWal, localFs.getConf());
            // Reader reader = new SequenceFile.Reader(localFs.getConf(), SequenceFile.Reader.file(localWal));
            Path tmp = new Path(options.destination + "/" + name + ".copy");
            FSDataOutputStream writer = fs.create(tmp);
            while (reader.next(key, value)) {
                try {
                    key.write(writer);
                    value.write(writer);
                } catch (EOFException ex) {
                    break;
                }
            }
            writer.close();
            reader.close();
            fs.rename(tmp, new Path(tmp.getParent(), name));

            if (options.deleteLocal) {
                if (file.delete()) {
                    log.info("Copied and deleted: " + name);
                } else {
                    log.info("Failed to delete: " + name + " (but it is safe for you to delete it manually).");
                }
            } else {
                log.info("Safe to delete: " + name);
            }
        }
    }
}