Example usage for org.apache.cassandra.io.sstable Descriptor fromFilename

List of usage examples for org.apache.cassandra.io.sstable Descriptor fromFilename

Introduction

In this page you can find the example usage for org.apache.cassandra.io.sstable Descriptor fromFilename.

Prototype

public static Descriptor fromFilename(File file) 

Source Link

Document

Parse a sstable filename into a Descriptor.

Usage

From source file:com.cloudian.support.SSTableGarbageChecker.java

License:Apache License

/**
 * @param args/*from w w  w.j a v a 2 s .c  o m*/
 */
public static void main(String[] args) {

    String usage = "Usage: checksstablegarbage (-t|-g) SSTableFile1 (SSTableFile2 ... SSTableFileN)";
    String showOption = (args.length > 0 && args[0].startsWith("-")) ? args[0] : null;

    if (args.length == 0 || args.length == 1 && showOption != null) {

        System.err.println("At lest 1 SSTableFile needs to be provided");
        System.err.println(usage);
        System.exit(1);

    }

    if (showOption != null) {

        if (!showOption.equals(SHOW_OPTION_TOMBSTONED) && !showOption.equals(SHOW_OPTION_GCABLE)) {

            System.err.println(showOption + " is not a valid option");
            System.err.println(usage);
            System.exit(1);

        }

    }

    System.out.println("Loading schema ...");
    DatabaseDescriptor.loadSchemas();
    System.out.println("Loaded schema");
    if (Schema.instance.getCFMetaData(ksName, cfName) == null) {
        System.err.println(cfName + " in " + ksName + " does not exist");
        System.exit(1);
    }

    File[] ssTableFiles = (showOption != null) ? new File[args.length - 1] : new File[args.length];
    ssTableFilePaths = new String[ssTableFiles.length];
    for (int i = (showOption != null) ? 1 : 0; i < args.length; i++) {
        int j = (showOption != null) ? i - 1 : i;
        ssTableFiles[j] = new File(args[i]);
        ssTableFilePaths[j] = ssTableFiles[j].getAbsolutePath();
    }
    Arrays.sort(ssTableFilePaths);

    descriptors = new Descriptor[ssTableFiles.length];
    Collection<SSTableReader> ssTables = null;
    StringBuffer message = new StringBuffer();
    for (int i = ssTableFiles.length - 1; i >= 0; i--) {

        System.out.println("Opening " + ssTableFiles[i].getName() + "...");

        if (!ssTableFiles[i].exists()) {
            System.err.println(args[i] + " does not exist");
            System.exit(1);
        } else {

            descriptors[i] = Descriptor.fromFilename(ssTableFiles[i].getAbsolutePath());

            if (i == ssTableFiles.length - 1) {

                ksName = descriptors[i].ksname;
                cfName = descriptors[i].cfname;

                // get ColumnFamilyStore instance
                System.out.println("Opening keyspace " + ksName + " ...");
                Keyspace keyspace = Keyspace.open(ksName);
                System.out.println("Opened keyspace " + ksName + " ...");

                System.out.println("Getting column family " + cfName + " ...");
                cfStore = keyspace.getColumnFamilyStore(cfName);
                ssTables = cfStore.getSSTables();
                System.out.println("Got column family " + cfName + " ...");

                // get gc grace seconds
                gcGraceSeconds = cfStore.metadata.getGcGraceSeconds();
                gcBefore = (int) (System.currentTimeMillis() / 1000) - gcGraceSeconds;

                message.append("[KS, CF, gc_grace(gcBefore, now)] = [" + ksName + DELIMITER + cfName + DELIMITER
                        + gcGraceSeconds + "(" + gcBefore + ", " + System.currentTimeMillis() + ")]");

            } else {

                boolean theSameCf = cfName.equals(descriptors[i].cfname);
                if (!theSameCf) {

                    System.err.println("All SSTableFiles have to belong to the same column family");
                    System.err.println(args[i] + " does not a member of " + cfName);
                    System.exit(1);

                }

            }

            SSTableReader found = null;
            for (SSTableReader reader : ssTables) {

                if (reader.getFilename().equals(ssTableFiles[i].getAbsolutePath())) {

                    found = reader;
                    break;

                }

            }

            if (found != null) {

                ssTableReaders.add(found);

            } else {

                System.err.println("Can not find SSTableReader for " + ssTableFiles[i].getAbsolutePath());
                if (!ssTableFiles[i].exists()) {
                    System.err.println(ssTableFiles[i].getName() + " does not exist anymore.");
                }
                System.err.println("Loaded SSTable files are the followings:");
                for (SSTableReader reader : ssTables) {
                    System.err.println(reader.getFilename());
                }
                System.exit(1);

            }

        }

        System.out.println("Opened " + ssTableFiles[i].getName());

    }

    System.out.println(message.toString());

    try {

        SSTableGarbageChecker.checkCompacted(showOption);

    } catch (IOException e) {
        System.err.println("Check failed due to " + e.getMessage());
        e.printStackTrace();
        System.exit(1);
    }

    // Successful
    System.exit(0);

}

From source file:com.cloudian.support.SSTableKeyWriter.java

License:Apache License

public SSTableKeyWriter(String ssTableFilePath, boolean verbose) throws IOException {

    this.ssTableFilePath = ssTableFilePath;
    this.verbose = verbose;

    // make sure to load before getting Descriptor
    DatabaseDescriptor.loadSchemas();//from ww  w .  j  a  va  2  s. c  o  m
    this.descriptor = Descriptor.fromFilename(this.ssTableFilePath);

    if (this.verbose)
        this.openSSTableReader();

}

From source file:com.jeffjirsa.cassandra.db.compaction.writers.CleaningTimeWindowCompactionWriter.java

License:Apache License

@Override
protected void switchCompactionLocation(Directories.DataDirectory directory) {
    @SuppressWarnings("resource")
    SSTableWriter writer = SSTableWriter.create(
            Descriptor.fromFilename(cfs.getSSTablePath(getDirectories().getLocationForDisk(directory))),
            estimatedTotalKeys, minRepairedAt, cfs.metadata,
            new MetadataCollector(txn.originals(), cfs.metadata.comparator, 0),
            SerializationHeader.make(cfs.metadata, nonExpiredSSTables), cfs.indexManager.listIndexes(), txn);
    sstableWriter.switchWriter(writer);//from   w  w  w  .j  a va2  s  .  c  o  m
}

From source file:com.knewton.mapreduce.SSTableRecordReader.java

License:Apache License

/**
 * Moves all the minimum required tables for the table reader to work to local disk.
 *
 * @param split The table to work on.//  ww w . jav  a  2 s. c  o  m
 */
@VisibleForTesting
void copyTablesToLocal(FileSystem remoteFS, FileSystem localFS, Path dataTablePath, TaskAttemptContext context)
        throws IOException {
    Configuration conf = context.getConfiguration();
    String hdfsDataTablePathStr = dataTablePath.toUri().getPath();
    String localDataTablePathStr = dataTablePath.toUri().getHost() + File.separator
            + dataTablePath.toUri().getPath();
    // Make path relative due to EMR permissions
    if (localDataTablePathStr.startsWith("/")) {
        String mapTaskId = conf.get("mapreduce.task.attempt.id");
        String mapTempDir = conf.get("mapreduce.cluster.temp.dir");
        String taskWorkDir = mapTempDir + File.separator + mapTaskId;
        LOG.info("Appending {} to {}", taskWorkDir, localDataTablePathStr);
        localDataTablePathStr = taskWorkDir + localDataTablePathStr;
    }
    Path localDataTablePath = new Path(localDataTablePathStr);
    LOG.info("Copying hdfs file from {} to local disk at {}.", dataTablePath.toUri(),
            localDataTablePath.toUri());
    copyToLocalFile(remoteFS, localFS, dataTablePath, localDataTablePath);
    boolean isCompressed = conf.getBoolean(PropertyConstants.COMPRESSION_ENABLED.txt, false);
    if (isCompressed) {
        decompress(localDataTablePath, context);
    }
    components.add(Component.DATA);
    desc = Descriptor.fromFilename(localDataTablePathStr);
    Descriptor hdfsDesc = Descriptor.fromFilename(hdfsDataTablePathStr);
    String indexPathStr = hdfsDesc.filenameFor(Component.PRIMARY_INDEX);
    components.add(Component.PRIMARY_INDEX);
    Path localIdxPath = new Path(desc.filenameFor(Component.PRIMARY_INDEX));
    LOG.info("Copying hdfs file from {} to local disk at {}.", indexPathStr, localIdxPath);
    copyToLocalFile(remoteFS, localFS, new Path(indexPathStr), localIdxPath);
    if (isCompressed) {
        decompress(localIdxPath, context);
    }
    String compressionTablePathStr = hdfsDesc.filenameFor(Component.COMPRESSION_INFO.name());
    Path compressionTablePath = new Path(compressionTablePathStr);
    if (remoteFS.exists(compressionTablePath)) {
        Path localCompressionPath = new Path(desc.filenameFor(Component.COMPRESSION_INFO.name()));
        LOG.info("Copying hdfs file from {} to local disk at {}.", compressionTablePath.toUri(),
                localCompressionPath);
        copyToLocalFile(remoteFS, localFS, compressionTablePath, localCompressionPath);
        if (isCompressed) {
            decompress(localCompressionPath, context);
        }
        components.add(Component.COMPRESSION_INFO);
    }
}

From source file:com.knewton.mapreduce.SSTableRecordReaderTest.java

License:Apache License

@Before
public void setup() throws IOException {
    job = Job.getInstance();/*from  w ww  .  ja v a  2s .c  om*/
    conf = job.getConfiguration();
    attemptId = new TaskAttemptID();
    conf.setInt("mapreduce.task.attempt.id", attemptId.getId());
    conf.set("mapreduce.cluster.temp.dir", "tempdir");

    Path inputPath = new Path(TABLE_PATH_STR);
    inputSplit = new FileSplit(inputPath, 0, 1, null);
    Descriptor desc = Descriptor.fromFilename(TABLE_PATH_STR);

    doReturn(desc).when(ssTableColumnRecordReader).getDescriptor();
    doReturn(desc).when(ssTableRowRecordReader).getDescriptor();

    doNothing().when(ssTableColumnRecordReader).copyTablesToLocal(any(FileSystem.class), any(FileSystem.class),
            any(Path.class), any(TaskAttemptContext.class));
    doNothing().when(ssTableRowRecordReader).copyTablesToLocal(any(FileSystem.class), any(FileSystem.class),
            any(Path.class), any(TaskAttemptContext.class));

    doReturn(ssTableReader).when(ssTableColumnRecordReader).openSSTableReader(any(IPartitioner.class),
            any(CFMetaData.class));
    doReturn(ssTableReader).when(ssTableRowRecordReader).openSSTableReader(any(IPartitioner.class),
            any(CFMetaData.class));
    when(ssTableReader.getScanner()).thenReturn(tableScanner);
}

From source file:com.netflix.aegisthus.input.readers.CommitLogRecordReader.java

License:Apache License

@Override
public void initialize(InputSplit inputSplit, TaskAttemptContext ctx) throws IOException, InterruptedException {
    AegSplit split = (AegSplit) inputSplit;

    start = split.getStart();//from   ww  w  .j  a  v  a  2 s .  co  m
    end = split.getEnd();
    final Path file = split.getPath();

    try {
        cfId = ctx.getConfiguration().getInt("commitlog.cfid", -1000);
        if (cfId == -1000) {
            throw new IOException("commitlog.cfid must be set");
        }
        // open the file and seek to the start of the split
        FileSystem fs = file.getFileSystem(ctx.getConfiguration());
        FSDataInputStream fileIn = fs.open(split.getPath());
        InputStream dis = new BufferedInputStream(fileIn);
        scanner = new CommitLogScanner(new DataInputStream(dis), split.getConvertors(),
                Descriptor.fromFilename(split.getPath().getName()).version);
        this.pos = start;
    } catch (IOException e) {
        throw new IOError(e);
    }
}

From source file:com.netflix.aegisthus.input.readers.SSTableRecordReader.java

License:Apache License

@SuppressWarnings("rawtypes")
@Override/*from   w ww.j av a 2s.c o  m*/
public void initialize(InputSplit inputSplit, TaskAttemptContext ctx) throws IOException, InterruptedException {
    AegSplit split = (AegSplit) inputSplit;

    start = split.getStart();
    //TODO: This has a side effect of setting compressionmetadata. remove this.
    InputStream is = split.getInput(ctx.getConfiguration());
    if (split.isCompressed()) {
        end = split.getCompressionMetadata().getDataLength();
    } else {
        end = split.getEnd();
    }
    outputFile = ctx.getConfiguration().getBoolean("aegsithus.debug.file", false);
    filename = split.getPath().toUri().toString();

    LOG.info(String.format("File: %s", split.getPath().toUri().getPath()));
    LOG.info("Start: " + start);
    LOG.info("End: " + end);
    if (ctx instanceof TaskInputOutputContext) {
        context = (TaskInputOutputContext) ctx;
    }

    try {
        scanner = new SSTableScanner(new DataInputStream(is), split.getConvertors(), end,
                Descriptor.fromFilename(filename).version);
        if (ctx.getConfiguration().get("aegisthus.maxcolsize") != null) {
            scanner.setMaxColSize(ctx.getConfiguration().getLong("aegisthus.maxcolsize", -1L));
            LOG.info(String.format("aegisthus.maxcolsize - %d",
                    ctx.getConfiguration().getLong("aegisthus.maxcolsize", -1L)));
        }
        scanner.skipUnsafe(start);
        this.pos = start;
    } catch (IOException e) {
        throw new IOError(e);
    }
}

From source file:com.netflix.aegisthus.io.sstable.IndexScanner.java

License:Apache License

public IndexScanner(String filename) {
    try {/*from   w w w .  j ava 2 s .  c o m*/
        this.input = new DataInputStream(new BufferedInputStream(new FileInputStream(filename), 65536 * 10));
        this.version = Descriptor.fromFilename(filename).version;
    } catch (IOException e) {
        throw new IOError(e);
    }
}

From source file:com.netflix.aegisthus.io.sstable.OffsetScanner.java

License:Apache License

public OffsetScanner(DataInput input, String filename) {
    this.input = input;
    this.version = Descriptor.fromFilename(filename).version;
}

From source file:com.netflix.aegisthus.io.sstable.OffsetScanner.java

License:Apache License

public OffsetScanner(String filename) {
    try {//from  ww w  . j a v a  2s .co m
        this.input = new DataInputStream(new BufferedInputStream(new FileInputStream(filename), 65536 * 10));
        this.version = Descriptor.fromFilename(filename).version;
    } catch (IOException e) {
        throw new IOError(e);
    }
}