List of usage examples for org.apache.cassandra.db ConsistencyLevel QUORUM
ConsistencyLevel QUORUM
To view the source code for org.apache.cassandra.db ConsistencyLevel QUORUM.
Click Source Link
From source file:com.datastax.brisk.BriskServer.java
License:Apache License
private LocalOrRemoteBlock getRemoteSubBlock(ByteBuffer blockId, ByteBuffer sblockId, int offset, ColumnParent subBlockDataPath)//from w w w . ja v a2 s . co m throws TimedOutException, UnavailableException, InvalidRequestException, NotFoundException { // The column name is the SubBlock id (UUID) ReadCommand rc = new SliceByNamesReadCommand(cfsKeyspace, blockId, subBlockDataPath, Arrays.asList(sblockId)); try { // CL=ONE as there are NOT multiple versions of the blocks. List<Row> rows = StorageProxy.read(Arrays.asList(rc), ConsistencyLevel.ONE); IColumn col = null; try { col = validateAndGetColumn(rows, sblockId); } catch (NotFoundException e) { // This is a best effort to get the value. Sometimes due to the size of // the sublocks, the normal replication may time out leaving a replicate without // the piece of data. Hence we re try with higher CL. rows = StorageProxy.read(Arrays.asList(rc), ConsistencyLevel.QUORUM); } col = validateAndGetColumn(rows, sblockId); ByteBuffer value = col.value(); if (value.remaining() < offset) throw new InvalidRequestException("Invalid offset for block of size: " + value.remaining()); LocalOrRemoteBlock block = new LocalOrRemoteBlock(); if (offset > 0) { ByteBuffer offsetBlock = value.duplicate(); offsetBlock.position(offsetBlock.position() + offset); block.setRemote_block(offsetBlock); } else { block.setRemote_block(value); } return block; } catch (IOException e) { throw new RuntimeException(e); } catch (TimeoutException e) { throw new TimedOutException(); } }
From source file:org.elassandra.cluster.InternalCassandraClusterService.java
License:Apache License
public static ConsistencyLevel consistencyLevelFromString(String value) { switch (value.toUpperCase(Locale.ROOT)) { case "ANY": return ConsistencyLevel.ANY; case "ONE": return ConsistencyLevel.ONE; case "TWO": return ConsistencyLevel.TWO; case "THREE": return ConsistencyLevel.THREE; case "QUORUM": return ConsistencyLevel.QUORUM; case "ALL": return ConsistencyLevel.ALL; case "LOCAL_QUORUM": return ConsistencyLevel.LOCAL_QUORUM; case "EACH_QUORUM": return ConsistencyLevel.EACH_QUORUM; case "SERIAL": return ConsistencyLevel.SERIAL; case "LOCAL_SERIAL": return ConsistencyLevel.LOCAL_SERIAL; case "LOCAL_ONE": return ConsistencyLevel.LOCAL_ONE; default:// www . j av a 2s .c om throw new IllegalArgumentException("No write consistency match [" + value + "]"); } }
From source file:org.elasticsearch.cluster.ClusterService.java
License:Apache License
@SuppressForbidden(reason = "toUpperCase() for consistency level") public static ConsistencyLevel consistencyLevelFromString(String value) { switch (value.toUpperCase()) { case "ANY": return ConsistencyLevel.ANY; case "ONE": return ConsistencyLevel.ONE; case "TWO": return ConsistencyLevel.TWO; case "THREE": return ConsistencyLevel.THREE; case "QUORUM": return ConsistencyLevel.QUORUM; case "ALL": return ConsistencyLevel.ALL; case "LOCAL_QUORUM": return ConsistencyLevel.LOCAL_QUORUM; case "EACH_QUORUM": return ConsistencyLevel.EACH_QUORUM; case "SERIAL": return ConsistencyLevel.SERIAL; case "LOCAL_SERIAL": return ConsistencyLevel.LOCAL_SERIAL; case "LOCAL_ONE": return ConsistencyLevel.LOCAL_ONE; default:/*from w ww . j a v a2s.c o m*/ throw new IllegalArgumentException("No write consistency match [" + value + "]"); } }
From source file:org.janusgraph.diskstorage.cassandra.embedded.CassandraEmbeddedKeyColumnValueStore.java
License:Apache License
/** * Create a RangeSliceCommand and run it against the StorageProxy. * <p>//from ww w . ja v a2 s .c o m * To match the behavior of the standard Cassandra thrift API endpoint, the * {@code nowMillis} argument should be the number of milliseconds since the * UNIX Epoch (e.g. System.currentTimeMillis() or equivalent obtained * through a {@link TimestampProvider}). This is per * {@link org.apache.cassandra.thrift.CassandraServer#get_range_slices(ColumnParent, SlicePredicate, KeyRange, ConsistencyLevel)}, * which passes the server's System.currentTimeMillis() to the * {@code RangeSliceCommand} constructor. */ private List<Row> getKeySlice(Token start, Token end, @Nullable SliceQuery sliceQuery, int pageSize, long nowMillis) throws BackendException { IPartitioner partitioner = StorageService.getPartitioner(); SliceRange columnSlice = new SliceRange(); if (sliceQuery == null) { columnSlice.setStart(ArrayUtils.EMPTY_BYTE_ARRAY).setFinish(ArrayUtils.EMPTY_BYTE_ARRAY).setCount(5); } else { columnSlice.setStart(sliceQuery.getSliceStart().asByteBuffer()) .setFinish(sliceQuery.getSliceEnd().asByteBuffer()) .setCount(sliceQuery.hasLimit() ? sliceQuery.getLimit() : Integer.MAX_VALUE); } /* Note: we need to fetch columns for each row as well to remove "range ghosts" */ SlicePredicate predicate = new SlicePredicate().setSlice_range(columnSlice); RowPosition startPosition = start.minKeyBound(partitioner); RowPosition endPosition = end.minKeyBound(partitioner); List<Row> rows; try { CFMetaData cfm = Schema.instance.getCFMetaData(keyspace, columnFamily); IDiskAtomFilter filter = ThriftValidation.asIFilter(predicate, cfm, null); RangeSliceCommand cmd = new RangeSliceCommand(keyspace, columnFamily, nowMillis, filter, new Bounds<RowPosition>(startPosition, endPosition), pageSize); rows = StorageProxy.getRangeSlice(cmd, ConsistencyLevel.QUORUM); } catch (Exception e) { throw new PermanentBackendException(e); } return rows; }
From source file:org.janusgraph.diskstorage.cassandra.embedded.CassandraEmbeddedStoreManager.java
License:Apache License
private void retryDummyRead(String ks, String cf) throws PermanentBackendException { final long limit = System.currentTimeMillis() + (60L * 1000L); while (System.currentTimeMillis() < limit) { try {//from ww w. j a v a2s . c o m SortedSet<CellName> names = new TreeSet<>(new Comparator<CellName>() { // This is a singleton set. We need to define a comparator because SimpleDenseCellName is not // comparable, but it doesn't have to be a useful comparator @Override public int compare(CellName o1, CellName o2) { return 0; } }); names.add(CellNames.simpleDense(ByteBufferUtil.zeroByteBuffer(1))); NamesQueryFilter nqf = new NamesQueryFilter(names); SliceByNamesReadCommand cmd = new SliceByNamesReadCommand(ks, ByteBufferUtil.zeroByteBuffer(1), cf, 1L, nqf); StorageProxy.read(ImmutableList.<ReadCommand>of(cmd), ConsistencyLevel.QUORUM); log.info("Read on CF {} in KS {} succeeded", cf, ks); return; } catch (Throwable t) { log.warn("Failed to read CF {} in KS {} following creation", cf, ks, t); } try { Thread.sleep(1000L); } catch (InterruptedException e) { throw new PermanentBackendException(e); } } throw new PermanentBackendException( "Timed out while attempting to read CF " + cf + " in KS " + ks + " following creation"); }
From source file:protocol.bigdata.cassandra.ClientOnlyExample.java
License:Apache License
private static void testWriting() throws Exception { // do some writing. for (int i = 0; i < 100; i++) { QueryProcessor.process(//from w w w .j a v a 2s . c om String.format("INSERT INTO %s.%s (id, name, value) VALUES ( 'key%d', 'colb', 'value%d')", KEYSPACE, COLUMN_FAMILY, i, i), ConsistencyLevel.QUORUM); System.out.println("wrote key" + i); } System.out.println("Done writing."); }
From source file:protocol.bigdata.cassandra.ClientOnlyExample.java
License:Apache License
private static void testReading() throws Exception { // do some queries. for (int i = 0; i < 100; i++) { String query = String.format("SELECT id, name, value FROM %s.%s WHERE id = 'key%d'", KEYSPACE, COLUMN_FAMILY, i);//w w w. ja v a 2 s . c o m UntypedResultSet.Row row = QueryProcessor.process(query, ConsistencyLevel.QUORUM).one(); System.out.println(String.format("ID: %s, Name: %s, Value: %s", row.getString("id"), row.getString("name"), row.getString("value"))); } }