andromache.hadoop.CassandraInputFormat.java Source code

Java tutorial

Introduction

Here is the source code for andromache.hadoop.CassandraInputFormat.java

Source

/*
 * Copyright 2013 Illarion Kovalchuk
 * <p/>
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 * <p/>
 * http://www.apache.org/licenses/LICENSE-2.0
 * <p/>
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package andromache.hadoop;

import com.google.common.collect.ImmutableList;
import org.apache.cassandra.db.IColumn;
import org.apache.cassandra.dht.IPartitioner;
import org.apache.cassandra.dht.Range;
import org.apache.cassandra.dht.Token;
import andromache.config.CassandraConfigHelper;
import org.apache.cassandra.thrift.Cassandra;
import org.apache.cassandra.thrift.InvalidRequestException;
import org.apache.cassandra.thrift.KeyRange;
import org.apache.cassandra.thrift.TokenRange;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.*;
import org.apache.thrift.TException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.net.InetAddress;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;

public class CassandraInputFormat extends InputFormat<ByteBuffer, SortedMap<ByteBuffer, IColumn>> {
    private static final Logger logger = LoggerFactory.getLogger(CassandraInputFormat.class);

    private String keyspace;
    private Collection<String> cfNames;
    private IPartitioner partitioner;

    private void validateConfiguration(Configuration conf) {
        if (CassandraConfigHelper.getInputKeyspace(conf) == null
                || CassandraConfigHelper.getInputColumnFamilies(conf) == null) {
            throw new UnsupportedOperationException(
                    "you must set the keyspace and columnfamily with setColumnFamily()");
        }
        if (CassandraConfigHelper.getInputSlicePredicate(conf) == null) {
            throw new UnsupportedOperationException("you must set the predicate with setPredicate");
        }
        if (CassandraConfigHelper.getInputInitialAddress(conf) == null) {
            throw new UnsupportedOperationException("You must set the initial output address to a Cassandra node");
        }
        if (CassandraConfigHelper.getInputPartitioner(conf) == null) {
            throw new UnsupportedOperationException("You must set the Cassandra partitioner class");
        }
    }

    public List<InputSplit> getSplits(JobContext context) throws IOException {
        Configuration conf = context.getConfiguration();
        validateConfiguration(conf);

        // cannonical ranges and nodes holding replicas
        List<TokenRange> masterRangeNodes = getRangeMap(conf);

        keyspace = CassandraConfigHelper.getInputKeyspace(context.getConfiguration());

        cfNames = CassandraConfigHelper.getInputColumnFamilies(context.getConfiguration());

        // TODO: [IS] make sure this partitioner matches to what is set on each keyspace participating
        partitioner = CassandraConfigHelper.getInputPartitioner(context.getConfiguration());
        logger.debug("partitioner is " + partitioner);

        // cannonical ranges, split into pieces, fetching the splits in parallel

        ExecutorService executor = Executors.newCachedThreadPool();
        List<InputSplit> splits = new ArrayList<InputSplit>();

        try {
            List<Future<List<CassandraSplit>>> splitfutures = new ArrayList<Future<List<CassandraSplit>>>();
            KeyRange jobKeyRange = CassandraConfigHelper.getInputKeyRange(conf);
            Range<Token> jobRange = null;
            if (jobKeyRange != null && jobKeyRange.start_token != null) {
                assert partitioner
                        .preservesOrder() : "ConfigHelper.setInputKeyRange(..) can only be used with a order preserving paritioner";
                assert jobKeyRange.start_key == null : "only start_token supported";
                assert jobKeyRange.end_key == null : "only end_token supported";
                jobRange = new Range<Token>(partitioner.getTokenFactory().fromString(jobKeyRange.start_token),
                        partitioner.getTokenFactory().fromString(jobKeyRange.end_token), partitioner);
            }

            for (TokenRange range : masterRangeNodes) {
                if (jobRange == null) {
                    // for each range, pick a live owner and ask it to compute bite-sized splits

                    splitfutures.add(executor.submit(new SplitCallable(range, conf)));
                } else {
                    Range<Token> dhtRange = new Range<Token>(
                            partitioner.getTokenFactory().fromString(range.start_token),
                            partitioner.getTokenFactory().fromString(range.end_token), partitioner);

                    if (dhtRange.intersects(jobRange)) {
                        for (Range<Token> intersection : dhtRange.intersectionWith(jobRange)) {
                            range.start_token = partitioner.getTokenFactory().toString(intersection.left);
                            range.end_token = partitioner.getTokenFactory().toString(intersection.right);
                            // for each range, pick a live owner and ask it to compute bite-sized splits
                            splitfutures.add(executor.submit(new SplitCallable(range, conf)));
                        }
                    }
                }
            }

            // wait until we have all the results back
            for (Future<List<CassandraSplit>> futureInputSplits : splitfutures) {
                try {
                    splits.addAll(futureInputSplits.get());
                } catch (Exception e) {
                    throw new IOException("Could not get input splits", e);
                }
            }
        } finally {
            executor.shutdownNow();
        }

        assert splits.size() > 0;
        Collections.shuffle(splits, new Random(System.nanoTime()));
        return splits;
    }

    /**
     * Gets a token range and splits it up according to the suggested
     * size into input splits that Hadoop can use.
     */
    class SplitCallable implements Callable<List<CassandraSplit>> {

        private final TokenRange range;
        private final Configuration conf;

        public SplitCallable(TokenRange tr, Configuration conf) {
            this.range = tr;
            this.conf = conf;
        }

        public List<CassandraSplit> call() throws Exception {
            List<CassandraSplit> result = new ArrayList<CassandraSplit>();
            for (String cfName : cfNames) {
                result.addAll(callForSingleCF(cfName));
            }
            return result;
        }

        private List<CassandraSplit> callForSingleCF(String cfName) throws IOException {
            ArrayList<CassandraSplit> splits = new ArrayList<CassandraSplit>();

            List<String> tokens = getSubSplits(keyspace, cfName, range, conf);

            assert range.rpc_endpoints.size() == range.endpoints
                    .size() : "rpc_endpoints size must match endpoints size";
            // turn the sub-ranges into InputSplits
            String[] endpoints = range.endpoints.toArray(new String[range.endpoints.size()]);
            // hadoop needs hostname, not ip
            int endpointIndex = 0;
            for (String endpoint : range.rpc_endpoints) {
                String endpoint_address = endpoint;
                if (endpoint_address == null || endpoint_address.equals("0.0.0.0")) {
                    endpoint_address = range.endpoints.get(endpointIndex);
                }
                endpoints[endpointIndex++] = InetAddress.getByName(endpoint_address).getHostName();
            }

            Token.TokenFactory factory = partitioner.getTokenFactory();
            for (int i = 1; i < tokens.size(); i++) {
                Token left = factory.fromString(tokens.get(i - 1));
                Token right = factory.fromString(tokens.get(i));
                Range<Token> range = new Range<Token>(left, right, partitioner);
                List<Range<Token>> ranges = range.isWrapAround() ? range.unwrap() : ImmutableList.of(range);
                for (Range<Token> subrange : ranges) {
                    CassandraSplit split = new CassandraSplit(factory.toString(subrange.left),
                            factory.toString(subrange.right), endpoints, cfName);
                    logger.debug("  adding {}", split);
                    splits.add(split);
                }
            }
            return splits;
        }
    }

    private List<String> getSubSplits(String keyspace, String cfName, TokenRange range, Configuration conf)
            throws IOException {
        int splitsize = CassandraConfigHelper.getInputSplitSize(conf);
        for (int i = 0; i < range.rpc_endpoints.size(); i++) {
            String host = range.rpc_endpoints.get(i);

            if (host == null || host.equals("0.0.0.0")) {
                host = range.endpoints.get(i);
            }

            try {
                Cassandra.Client client = CassandraConfigHelper.createConnection(conf, host,
                        CassandraConfigHelper.getInputRpcPort(conf));
                client.set_keyspace(keyspace);
                List<String> splits = client.describe_splits(cfName, range.start_token, range.end_token, splitsize);
                logger.info("{} returned splits for {}.{}({}): {}",
                        new Object[] { host, keyspace, cfName, range, splits });
                return splits;
            } catch (IOException e) {
                logger.debug("failed connect to endpoint " + host, e);
            } catch (TException e) {
                throw new RuntimeException(e);
            } catch (InvalidRequestException e) {
                throw new RuntimeException(e);
            }
        }
        throw new IOException("failed connecting to all endpoints " + StringUtils.join(range.endpoints, ","));
    }

    // TODO: [IS] what about refactoring CassandraInputFormat and CassandraMultiSourcesInputFormat
    private List<TokenRange> getRangeMap(Configuration conf) throws IOException {
        Cassandra.Client client = CassandraConfigHelper.getClientFromInputAddressList(conf);

        List<TokenRange> map;
        try {
            map = client.describe_ring(CassandraConfigHelper.getInputKeyspace(conf));
        } catch (TException e) {
            throw new RuntimeException(e);
        } catch (InvalidRequestException e) {
            throw new RuntimeException(e);
        }
        logger.info("Ring was described: {}", map);
        return map;
    }

    public RecordReader<ByteBuffer, SortedMap<ByteBuffer, IColumn>> createRecordReader(InputSplit inputSplit,
            TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
        return new CassandraRecordReader();
    }

}