List of usage examples for org.bouncycastle.crypto.paddings PKCS7Padding padCount
public int padCount(byte[] in) throws InvalidCipherTextException
From source file:com.geoxp.oss.CryptoHelper.java
License:Apache License
/** * Remove PKCS7 padding from padded data * @param padded The padded data to 'unpad' * @return The original unpadded data//from w ww . j a va2 s.c o m * @throws InvalidCipherTextException if data is not correctly padded */ public static byte[] unpadPKCS7(byte[] padded) throws InvalidCipherTextException { PKCS7Padding padding = new PKCS7Padding(); // // Determine length of padding // int pad = padding.padCount(padded); // // Allocate array for unpadded data // byte[] unpadded = new byte[padded.length - pad]; // // Copy data without the padding // System.arraycopy(padded, 0, unpadded, 0, padded.length - pad); return unpadded; }
From source file:io.warp10.continuum.gts.CustomBufferBasedGTSDecoder.java
License:Apache License
/** * Attempt to read the next measurement and associated metadata (timestamp, location, elevation) * @return true if a measurement was successfully read, false if none were left in the buffer. *//*w w w . j a va 2 s . c o m*/ public boolean next() { // // Update position prior to reading the next value, etc so we can // this.position = this.buffer.position(); if (!buffer.hasRemaining()) { return false; } this.nextCalled = true; // // Read timestamp/type flag // byte tsTypeFlag = buffer.get(); // // Check if we encountered encrypted data // if (GTSEncoder.FLAGS_ENCRYPTED == (tsTypeFlag & GTSEncoder.FLAGS_MASK_ENCRYPTED)) { // // Extract encrypted length // int enclen = (int) Varint.decodeUnsignedLong(buffer); // // If there is no decryption key, simply skip the encrypted data // and call next recursively. // if (null == wrappingKey) { buffer.position(buffer.position() + enclen); // WARNING(hbs): if there are many encrypted chunks this may lead to a stack overflow return next(); } byte[] encrypted = new byte[enclen]; buffer.get(encrypted); // // Decrypt the encrypted data // AESWrapEngine engine = new AESWrapEngine(); CipherParameters params = new KeyParameter(this.wrappingKey); engine.init(false, params); try { byte[] decrypted = engine.unwrap(encrypted, 0, encrypted.length); // // Unpad the decrypted data // PKCS7Padding padding = new PKCS7Padding(); int padcount = padding.padCount(decrypted); // // Replace the current buffer with a new one containing the // decrypted data followed by any remaining data in the original // buffer. // this.buffer.insert(decrypted, 0, decrypted.length - padcount); } catch (InvalidCipherTextException icte) { // FIXME(hbs): log this somewhere... // // Skip the encrypted chunk we failed to decrypt // } // // Call next recursively // // WARNING(hbs): we may hit StackOverflow in some cases return next(); } // // Read location/elevation flag if needed // byte locElevFlag = 0x0; if (GTSEncoder.FLAGS_CONTINUATION == (tsTypeFlag & GTSEncoder.FLAGS_CONTINUATION)) { if (!buffer.hasRemaining()) { return false; } locElevFlag = buffer.get(); } // // Read timestamp // switch (tsTypeFlag & GTSEncoder.FLAGS_MASK_TIMESTAMP) { case GTSEncoder.FLAGS_TIMESTAMP_RAW_ABSOLUTE: { ByteOrder order = buffer.order(); buffer.order(ByteOrder.BIG_ENDIAN); previousLastTimestamp = lastTimestamp; lastTimestamp = buffer.getLong(); buffer.order(order); } break; //case GTSEncoder.FLAGS_TIMESTAMP_ZIGZAG_ABSOLUTE: // previousLastTimestamp = lastTimestamp; // lastTimestamp = Varint.decodeSignedLong(buffer); // break; case GTSEncoder.FLAGS_TIMESTAMP_EQUALS_BASE: previousLastTimestamp = lastTimestamp; lastTimestamp = baseTimestamp; break; case GTSEncoder.FLAGS_TIMESTAMP_ZIGZAG_DELTA_BASE: { long delta = Varint.decodeSignedLong(buffer); previousLastTimestamp = lastTimestamp; lastTimestamp = baseTimestamp + delta; } break; case GTSEncoder.FLAGS_TIMESTAMP_ZIGZAG_DELTA_PREVIOUS: { long delta = Varint.decodeSignedLong(buffer); previousLastTimestamp = lastTimestamp; lastTimestamp = lastTimestamp + delta; } break; default: throw new RuntimeException("Invalid timestamp format."); } // // Read location/elevation // if (GTSEncoder.FLAGS_LOCATION == (locElevFlag & GTSEncoder.FLAGS_LOCATION)) { if (GTSEncoder.FLAGS_LOCATION_IDENTICAL != (locElevFlag & GTSEncoder.FLAGS_LOCATION_IDENTICAL)) { if (GTSEncoder.FLAGS_LOCATION_GEOXPPOINT_ZIGZAG_DELTA == (locElevFlag & GTSEncoder.FLAGS_LOCATION_GEOXPPOINT_ZIGZAG_DELTA)) { long delta = Varint.decodeSignedLong(buffer); previousLastGeoXPPoint = lastGeoXPPoint; lastGeoXPPoint = lastGeoXPPoint + delta; } else { ByteOrder order = buffer.order(); buffer.order(ByteOrder.BIG_ENDIAN); previousLastGeoXPPoint = lastGeoXPPoint; lastGeoXPPoint = buffer.getLong(); buffer.order(order); } } } else { previousLastGeoXPPoint = lastGeoXPPoint; lastGeoXPPoint = GeoTimeSerie.NO_LOCATION; } if (GTSEncoder.FLAGS_ELEVATION == (locElevFlag & GTSEncoder.FLAGS_ELEVATION)) { if (GTSEncoder.FLAGS_ELEVATION_IDENTICAL != (locElevFlag & GTSEncoder.FLAGS_ELEVATION_IDENTICAL)) { boolean zigzag = GTSEncoder.FLAGS_ELEVATION_ZIGZAG == (locElevFlag & GTSEncoder.FLAGS_ELEVATION_ZIGZAG); long encoded; if (zigzag) { encoded = Varint.decodeSignedLong(buffer); } else { ByteOrder order = buffer.order(); buffer.order(ByteOrder.BIG_ENDIAN); encoded = buffer.getLong(); buffer.order(order); } if (GTSEncoder.FLAGS_ELEVATION_DELTA_PREVIOUS == (locElevFlag & GTSEncoder.FLAGS_ELEVATION_DELTA_PREVIOUS)) { previousLastElevation = lastElevation; lastElevation = lastElevation + encoded; } else { previousLastElevation = lastElevation; lastElevation = encoded; } } } else { previousLastElevation = lastElevation; lastElevation = GeoTimeSerie.NO_ELEVATION; } // // Extract value // switch (tsTypeFlag & GTSEncoder.FLAGS_MASK_TYPE) { case GTSEncoder.FLAGS_TYPE_LONG: lastType = TYPE.LONG; if (GTSEncoder.FLAGS_VALUE_IDENTICAL != (tsTypeFlag & GTSEncoder.FLAGS_VALUE_IDENTICAL)) { long encoded; if (GTSEncoder.FLAGS_LONG_ZIGZAG == (tsTypeFlag & GTSEncoder.FLAGS_LONG_ZIGZAG)) { encoded = Varint.decodeSignedLong(buffer); } else { ByteOrder order = buffer.order(); buffer.order(ByteOrder.BIG_ENDIAN); encoded = buffer.getLong(); buffer.order(order); } if (GTSEncoder.FLAGS_LONG_DELTA_PREVIOUS == (tsTypeFlag & GTSEncoder.FLAGS_LONG_DELTA_PREVIOUS)) { previousLastLongValue = lastLongValue; lastLongValue = lastLongValue + encoded; } else { previousLastLongValue = lastLongValue; lastLongValue = encoded; } } break; case GTSEncoder.FLAGS_TYPE_DOUBLE: lastType = TYPE.DOUBLE; if (GTSEncoder.FLAGS_VALUE_IDENTICAL != (tsTypeFlag & GTSEncoder.FLAGS_VALUE_IDENTICAL)) { if (GTSEncoder.FLAGS_DOUBLE_IEEE754 == (tsTypeFlag & GTSEncoder.FLAGS_DOUBLE_IEEE754)) { ByteOrder order = buffer.order(); buffer.order(ByteOrder.BIG_ENDIAN); previousLastDoubleValue = lastDoubleValue; lastDoubleValue = buffer.getDouble(); previousLastBDValue = lastBDValue; lastBDValue = null; buffer.order(order); } else { int scale = buffer.get(); long unscaled = Varint.decodeSignedLong(buffer); previousLastBDValue = lastBDValue; lastBDValue = new BigDecimal(new BigInteger(Long.toString(unscaled)), scale); } } break; case GTSEncoder.FLAGS_TYPE_STRING: lastType = TYPE.STRING; if (GTSEncoder.FLAGS_VALUE_IDENTICAL != (tsTypeFlag & GTSEncoder.FLAGS_VALUE_IDENTICAL)) { // Decode String length long len = Varint.decodeUnsignedLong(buffer); // Prevent excessive allocation if (len > buffer.remaining()) { throw new RuntimeException("Invalid string length."); } byte[] utf8 = new byte[(int) len]; // Read String UTF8 representation buffer.get(utf8); previousLastStringValue = lastStringValue; lastStringValue = new String(utf8, Charsets.UTF_8); } break; case GTSEncoder.FLAGS_TYPE_BOOLEAN: if (GTSEncoder.FLAGS_DELETE_MARKER == (tsTypeFlag & GTSEncoder.FLAGS_MASK_TYPE_FLAGS)) { lastType = TYPE.UNDEFINED; } else { lastType = TYPE.BOOLEAN; if (GTSEncoder.FLAGS_BOOLEAN_VALUE_TRUE == (tsTypeFlag & GTSEncoder.FLAGS_MASK_TYPE_FLAGS)) { lastBooleanValue = true; } else if (GTSEncoder.FLAGS_BOOLEAN_VALUE_FALSE == (tsTypeFlag & GTSEncoder.FLAGS_MASK_TYPE_FLAGS)) { lastBooleanValue = false; } else { throw new RuntimeException("Invalid boolean value."); } //lastBooleanValue = GTSEncoder.FLAGS_BOOLEAN_VALUE == (tsTypeFlag & GTSEncoder.FLAGS_BOOLEAN_VALUE); } break; default: throw new RuntimeException("Invalid type encountered!"); } return true; }
From source file:io.warp10.continuum.gts.GTSDecoder.java
License:Apache License
/** * Attempt to read the next measurement and associated metadata (timestamp, location, elevation) * @return true if a measurement was successfully read, false if none were left in the buffer. *///from w ww .ja va 2s . co m public boolean next() { // // Update position prior to reading the next value, etc so we can // this.position = this.buffer.position(); if (!buffer.hasRemaining()) { return false; } this.nextCalled = true; // // Read timestamp/type flag // byte tsTypeFlag = buffer.get(); // // Check if we encountered encrypted data // if (GTSEncoder.FLAGS_ENCRYPTED == (tsTypeFlag & GTSEncoder.FLAGS_MASK_ENCRYPTED)) { // // Extract encrypted length // int enclen = (int) Varint.decodeUnsignedLong(buffer); // // If there is no decryption key, simply skip the encrypted data // and call next recursively. // if (null == wrappingKey) { buffer.position(buffer.position() + enclen); // WARNING(hbs): if there are many encrypted chunks this may lead to a stack overflow return next(); } byte[] encrypted = new byte[enclen]; buffer.get(encrypted); // // Decrypt the encrypted data // AESWrapEngine engine = new AESWrapEngine(); CipherParameters params = new KeyParameter(this.wrappingKey); engine.init(false, params); try { byte[] decrypted = engine.unwrap(encrypted, 0, encrypted.length); // // Unpad the decrypted data // PKCS7Padding padding = new PKCS7Padding(); int padcount = padding.padCount(decrypted); // // Replace the current buffer with a new one containing the // decrypted data followed by any remaining data in the original // buffer. // ByteBuffer bb = ByteBuffer.allocate(decrypted.length - padcount + this.buffer.remaining()); bb.put(decrypted, 0, decrypted.length - padcount); bb.put(this.buffer); bb.flip(); this.buffer = bb; decodedEncrypted = true; } catch (InvalidCipherTextException icte) { // FIXME(hbs): log this somewhere... // // Skip the encrypted chunk we failed to decrypt // } // // Call next recursively // // WARNING(hbs): we may hit StackOverflow in some cases return next(); } // // Read location/elevation flag if needed // byte locElevFlag = 0x0; if (GTSEncoder.FLAGS_CONTINUATION == (tsTypeFlag & GTSEncoder.FLAGS_CONTINUATION)) { if (!buffer.hasRemaining()) { return false; } locElevFlag = buffer.get(); } // // Read timestamp // switch (tsTypeFlag & GTSEncoder.FLAGS_MASK_TIMESTAMP) { case GTSEncoder.FLAGS_TIMESTAMP_RAW_ABSOLUTE: { ByteOrder order = buffer.order(); buffer.order(ByteOrder.BIG_ENDIAN); previousLastTimestamp = lastTimestamp; lastTimestamp = buffer.getLong(); buffer.order(order); } break; //case GTSEncoder.FLAGS_TIMESTAMP_ZIGZAG_ABSOLUTE: // previousLastTimestamp = lastTimestamp; // lastTimestamp = Varint.decodeSignedLong(buffer); // break; case GTSEncoder.FLAGS_TIMESTAMP_EQUALS_BASE: previousLastTimestamp = lastTimestamp; lastTimestamp = baseTimestamp; break; case GTSEncoder.FLAGS_TIMESTAMP_ZIGZAG_DELTA_BASE: { long delta = Varint.decodeSignedLong(buffer); previousLastTimestamp = lastTimestamp; lastTimestamp = baseTimestamp + delta; } break; case GTSEncoder.FLAGS_TIMESTAMP_ZIGZAG_DELTA_PREVIOUS: { long delta = Varint.decodeSignedLong(buffer); previousLastTimestamp = lastTimestamp; lastTimestamp = lastTimestamp + delta; } break; default: throw new RuntimeException("Invalid timestamp format."); } // // Read location/elevation // if (GTSEncoder.FLAGS_LOCATION == (locElevFlag & GTSEncoder.FLAGS_LOCATION)) { if (GTSEncoder.FLAGS_LOCATION_IDENTICAL != (locElevFlag & GTSEncoder.FLAGS_LOCATION_IDENTICAL)) { if (GTSEncoder.FLAGS_LOCATION_GEOXPPOINT_ZIGZAG_DELTA == (locElevFlag & GTSEncoder.FLAGS_LOCATION_GEOXPPOINT_ZIGZAG_DELTA)) { long delta = Varint.decodeSignedLong(buffer); previousLastGeoXPPoint = lastGeoXPPoint; lastGeoXPPoint = lastGeoXPPoint + delta; } else { ByteOrder order = buffer.order(); buffer.order(ByteOrder.BIG_ENDIAN); previousLastGeoXPPoint = lastGeoXPPoint; lastGeoXPPoint = buffer.getLong(); buffer.order(order); } } } else { previousLastGeoXPPoint = lastGeoXPPoint; lastGeoXPPoint = GeoTimeSerie.NO_LOCATION; } if (GTSEncoder.FLAGS_ELEVATION == (locElevFlag & GTSEncoder.FLAGS_ELEVATION)) { if (GTSEncoder.FLAGS_ELEVATION_IDENTICAL != (locElevFlag & GTSEncoder.FLAGS_ELEVATION_IDENTICAL)) { boolean zigzag = GTSEncoder.FLAGS_ELEVATION_ZIGZAG == (locElevFlag & GTSEncoder.FLAGS_ELEVATION_ZIGZAG); long encoded; if (zigzag) { encoded = Varint.decodeSignedLong(buffer); } else { ByteOrder order = buffer.order(); buffer.order(ByteOrder.BIG_ENDIAN); encoded = buffer.getLong(); buffer.order(order); } if (GTSEncoder.FLAGS_ELEVATION_DELTA_PREVIOUS == (locElevFlag & GTSEncoder.FLAGS_ELEVATION_DELTA_PREVIOUS)) { previousLastElevation = lastElevation; lastElevation = lastElevation + encoded; } else { previousLastElevation = lastElevation; lastElevation = encoded; } } } else { previousLastElevation = lastElevation; lastElevation = GeoTimeSerie.NO_ELEVATION; } // // Extract value // switch (tsTypeFlag & GTSEncoder.FLAGS_MASK_TYPE) { case GTSEncoder.FLAGS_TYPE_LONG: lastType = TYPE.LONG; if (GTSEncoder.FLAGS_VALUE_IDENTICAL != (tsTypeFlag & GTSEncoder.FLAGS_VALUE_IDENTICAL)) { long encoded; if (GTSEncoder.FLAGS_LONG_ZIGZAG == (tsTypeFlag & GTSEncoder.FLAGS_LONG_ZIGZAG)) { encoded = Varint.decodeSignedLong(buffer); } else { ByteOrder order = buffer.order(); buffer.order(ByteOrder.BIG_ENDIAN); encoded = buffer.getLong(); buffer.order(order); } if (GTSEncoder.FLAGS_LONG_DELTA_PREVIOUS == (tsTypeFlag & GTSEncoder.FLAGS_LONG_DELTA_PREVIOUS)) { previousLastLongValue = lastLongValue; lastLongValue = lastLongValue + encoded; } else { previousLastLongValue = lastLongValue; lastLongValue = encoded; } } else { previousLastLongValue = lastLongValue; } break; case GTSEncoder.FLAGS_TYPE_DOUBLE: lastType = TYPE.DOUBLE; if (GTSEncoder.FLAGS_VALUE_IDENTICAL != (tsTypeFlag & GTSEncoder.FLAGS_VALUE_IDENTICAL)) { if (GTSEncoder.FLAGS_DOUBLE_IEEE754 == (tsTypeFlag & GTSEncoder.FLAGS_DOUBLE_IEEE754)) { ByteOrder order = buffer.order(); buffer.order(ByteOrder.BIG_ENDIAN); previousLastDoubleValue = lastDoubleValue; lastDoubleValue = buffer.getDouble(); previousLastBDValue = lastBDValue; lastBDValue = null; buffer.order(order); } else { int scale = buffer.get(); long unscaled = Varint.decodeSignedLong(buffer); previousLastBDValue = lastBDValue; lastBDValue = new BigDecimal(new BigInteger(Long.toString(unscaled)), scale); } } else { previousLastDoubleValue = lastDoubleValue; previousLastBDValue = lastBDValue; } break; case GTSEncoder.FLAGS_TYPE_STRING: lastType = TYPE.STRING; if (GTSEncoder.FLAGS_VALUE_IDENTICAL != (tsTypeFlag & GTSEncoder.FLAGS_VALUE_IDENTICAL)) { // Decode String length long len = Varint.decodeUnsignedLong(buffer); // Prevent excessive allocation if (len > buffer.remaining()) { throw new RuntimeException("Invalid string length."); } byte[] utf8 = new byte[(int) len]; // Read String UTF8 representation buffer.get(utf8); previousLastStringValue = lastStringValue; lastStringValue = new String(utf8, Charsets.UTF_8); } else { previousLastStringValue = lastStringValue; } break; case GTSEncoder.FLAGS_TYPE_BOOLEAN: if (GTSEncoder.FLAGS_DELETE_MARKER == (tsTypeFlag & GTSEncoder.FLAGS_MASK_TYPE_FLAGS)) { lastType = TYPE.UNDEFINED; } else { lastType = TYPE.BOOLEAN; if (GTSEncoder.FLAGS_BOOLEAN_VALUE_TRUE == (tsTypeFlag & GTSEncoder.FLAGS_MASK_TYPE_FLAGS)) { lastBooleanValue = true; } else if (GTSEncoder.FLAGS_BOOLEAN_VALUE_FALSE == (tsTypeFlag & GTSEncoder.FLAGS_MASK_TYPE_FLAGS)) { lastBooleanValue = false; } else { throw new RuntimeException("Invalid boolean value."); } //lastBooleanValue = GTSEncoder.FLAGS_BOOLEAN_VALUE == (tsTypeFlag & GTSEncoder.FLAGS_BOOLEAN_VALUE); } break; default: throw new RuntimeException("Invalid type encountered!"); } this.consumingNextCalls++; return true; }
From source file:io.warp10.continuum.gts.GTSEncoderTest.java
License:Apache License
@Test public void testAddValue_encrypted() throws Exception { long now = System.currentTimeMillis() * 1000L; byte[] key = new byte[32]; GTSEncoder encoder = new GTSEncoder(now - 1000000L, key); encoder.addValue(now, GeoTimeSerie.NO_LOCATION, GeoTimeSerie.NO_ELEVATION, 1L); encoder.addValue(now + 1000000L, GeoTimeSerie.NO_LOCATION, GeoTimeSerie.NO_ELEVATION, 2L); byte[] encrypted = encoder.getBytes(); Assert.assertEquals(GTSEncoder.FLAGS_ENCRYPTED, encrypted[0] & GTSEncoder.FLAGS_MASK_ENCRYPTED); Assert.assertEquals(26, encrypted.length); ///*from www.ja v a2 s .co m*/ // Now check that we can decrypt the payload // We can't use n offset different than 0 in unwrap due to BJA-461 // so we have to copy the data prior to decrypting it. // AESWrapEngine engine = new AESWrapEngine(); KeyParameter params = new KeyParameter(key); engine.init(false, params); byte[] enc = new byte[24]; System.arraycopy(encrypted, 2, enc, 0, 24); byte[] decrypted = engine.unwrap(enc, 0, 24); // // Now decode the decrypted data // PKCS7Padding padding = new PKCS7Padding(); GTSDecoder decoder = new GTSDecoder(now - 1000000L, ByteBuffer.wrap(decrypted, 0, decrypted.length - padding.padCount(decrypted))); decoder.next(); Assert.assertEquals(now, decoder.getTimestamp()); Assert.assertEquals(GeoTimeSerie.NO_LOCATION, decoder.getLocation()); Assert.assertEquals(GeoTimeSerie.NO_ELEVATION, decoder.getElevation()); Assert.assertEquals(1L, decoder.getValue()); decoder.next(); Assert.assertEquals(now + 1000000L, decoder.getTimestamp()); Assert.assertEquals(GeoTimeSerie.NO_LOCATION, decoder.getLocation()); Assert.assertEquals(GeoTimeSerie.NO_ELEVATION, decoder.getElevation()); Assert.assertEquals(2L, decoder.getValue()); }
From source file:io.warp10.continuum.store.Directory.java
License:Apache License
public Directory(KeyStore keystore, final Properties props) throws IOException { this.keystore = keystore; SIPHASH_CLASS_LONGS = SipHashInline.getKey(this.keystore.getKey(KeyStore.SIPHASH_CLASS)); SIPHASH_LABELS_LONGS = SipHashInline.getKey(this.keystore.getKey(KeyStore.SIPHASH_LABELS)); this.properties = (Properties) props.clone(); ///*from w w w. ja v a 2 s. com*/ // Check mandatory parameters // for (String required : REQUIRED_PROPERTIES) { Preconditions.checkNotNull(properties.getProperty(required), "Missing configuration parameter '%s'.", required); } maxThriftFrameLength = Integer.parseInt( this.properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_FRAME_MAXLEN, "0")); maxFindResults = Integer.parseInt( this.properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_FIND_MAXRESULTS, "100000")); maxHardFindResults = Integer.parseInt( this.properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_FIND_MAXRESULTS_HARD)); this.register = "true" .equals(this.properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_REGISTER)); this.init = "true".equals(this.properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_INIT)); this.store = "true".equals(this.properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_STORE)); this.delete = "true" .equals(this.properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_DELETE)); // // Extract parameters // if (null != props.getProperty(io.warp10.continuum.Configuration.DIRECTORY_METADATA_CACHE_SIZE)) { this.METADATA_CACHE_SIZE = Integer .valueOf(props.getProperty(io.warp10.continuum.Configuration.DIRECTORY_METADATA_CACHE_SIZE)); } idleTimeout = Long.parseLong( this.properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_STREAMING_IDLE_TIMEOUT)); if (properties.containsKey(io.warp10.continuum.Configuration.DIRECTORY_STATS_CLASS_MAXCARDINALITY)) { this.LIMIT_CLASS_CARDINALITY = Long.parseLong( properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_STATS_CLASS_MAXCARDINALITY)); } if (properties.containsKey(io.warp10.continuum.Configuration.DIRECTORY_STATS_LABELS_MAXCARDINALITY)) { this.LIMIT_LABELS_CARDINALITY = Long.parseLong(properties .getProperty(io.warp10.continuum.Configuration.DIRECTORY_STATS_LABELS_MAXCARDINALITY)); } this.initNThreads = Integer.parseInt(properties.getProperty( io.warp10.continuum.Configuration.DIRECTORY_INIT_NTHREADS, DIRECTORY_INIT_NTHREADS_DEFAULT)); String partition = properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_PARTITION); String[] tokens = partition.split(":"); this.modulus = Integer.parseInt(tokens[0]); this.remainder = Integer.parseInt(tokens[1]); this.maxage = Long.parseLong(properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_MAXAGE)); final String topic = properties .getProperty(io.warp10.continuum.Configuration.DIRECTORY_KAFKA_METADATA_TOPIC); final int nthreads = Integer .valueOf(properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_KAFKA_NTHREADS)); Configuration conf = new Configuration(); conf.set("hbase.zookeeper.quorum", properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_HBASE_METADATA_ZKCONNECT)); if (!"".equals(properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_HBASE_METADATA_ZNODE))) { conf.set("zookeeper.znode.parent", properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_HBASE_METADATA_ZNODE)); } if (properties .containsKey(io.warp10.continuum.Configuration.DIRECTORY_HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT)) { conf.set("hbase.zookeeper.property.clientPort", properties .getProperty(io.warp10.continuum.Configuration.DIRECTORY_HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT)); } // // Handle additional HBase configurations // if (properties.containsKey(io.warp10.continuum.Configuration.DIRECTORY_HBASE_CONFIG)) { String[] keys = properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_HBASE_CONFIG) .split(","); for (String key : keys) { if (!properties.containsKey("directory." + key.trim())) { throw new RuntimeException("Missing declared property 'directory." + key.trim() + "'."); } conf.set(key, properties.getProperty("directory." + key.trim())); } } this.conn = ConnectionFactory.createConnection(conf); this.hbaseTable = TableName .valueOf(properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_HBASE_METADATA_TABLE)); this.colfam = properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_HBASE_METADATA_COLFAM) .getBytes(Charsets.UTF_8); this.serviceNThreads = Integer .valueOf(properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_SERVICE_NTHREADS)); // // Extract keys // extractKeys(properties); SIPHASH_PSK_LONGS = SipHashInline.getKey(this.keystore.getKey(KeyStore.SIPHASH_DIRECTORY_PSK)); // // Load Directory plugin // if (this.properties.containsKey(io.warp10.continuum.Configuration.DIRECTORY_PLUGIN_CLASS)) { try { // Create new classloader with filtering so caller cannot access the warp10 classes, except those needed ClassLoader filteringCL = new ClassLoader(this.getClass().getClassLoader()) { @Override protected Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException { if (name.startsWith("io.warp10") && !name.startsWith("io.warp10.warp.sdk.")) { throw new ClassNotFoundException(); } else { return this.getParent().loadClass(name); } } }; Class pluginClass = Class.forName( (String) properties.get(io.warp10.continuum.Configuration.DIRECTORY_PLUGIN_CLASS), true, filteringCL); this.plugin = (DirectoryPlugin) pluginClass.newInstance(); // // Now call the 'init' method of the plugin // this.plugin.init(new Properties(properties)); } catch (Exception e) { throw new RuntimeException("Unable to instantiate plugin class", e); } } else { this.plugin = null; } // // Create Curator framework and service discovery // CuratorFramework curatorFramework = CuratorFrameworkFactory.builder().connectionTimeoutMs(1000) .retryPolicy(new RetryNTimes(10, 500)) .connectString(properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_ZK_QUORUM)) .build(); curatorFramework.start(); this.sd = ServiceDiscoveryBuilder.builder(Map.class) .basePath(properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_ZK_ZNODE)) .client(curatorFramework).build(); // // Launch a Thread which will populate the metadata cache // We don't do that in the constructor otherwise it might take too long to return // final Directory self = this; if (this.init) { Thread[] initThreads = new Thread[this.initNThreads]; final AtomicBoolean[] stopMarkers = new AtomicBoolean[this.initNThreads]; final LinkedBlockingQueue<Result> resultQ = new LinkedBlockingQueue<Result>(initThreads.length * 8192); for (int i = 0; i < initThreads.length; i++) { stopMarkers[i] = new AtomicBoolean(false); final AtomicBoolean stopMe = stopMarkers[i]; initThreads[i] = new Thread(new Runnable() { @Override public void run() { AESWrapEngine engine = null; if (null != self.keystore.getKey(KeyStore.AES_HBASE_METADATA)) { engine = new AESWrapEngine(); CipherParameters params = new KeyParameter( self.keystore.getKey(KeyStore.AES_HBASE_METADATA)); engine.init(false, params); } PKCS7Padding padding = new PKCS7Padding(); TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory()); while (!stopMe.get()) { try { Result result = resultQ.poll(100, TimeUnit.MILLISECONDS); if (null == result) { continue; } byte[] value = result.getValue(self.colfam, Constants.EMPTY_COLQ); if (null != engine) { // // Unwrap // byte[] unwrapped = engine.unwrap(value, 0, value.length); // // Unpad // int padcount = padding.padCount(unwrapped); value = Arrays.copyOf(unwrapped, unwrapped.length - padcount); } // // Deserialize // Metadata metadata = new Metadata(); deserializer.deserialize(metadata, value); // // Compute classId/labelsId and compare it to the values in the row key // long classId = GTSHelper.classId(self.SIPHASH_CLASS_LONGS, metadata.getName()); long labelsId = GTSHelper.labelsId(self.SIPHASH_LABELS_LONGS, metadata.getLabels()); // // Recheck labelsid so we don't retain GTS with invalid labelsid in the row key (which may have happened due // to bugs) // int rem = ((int) ((labelsId >>> 56) & 0xffL)) % self.modulus; if (self.remainder != rem) { continue; } ByteBuffer bb = ByteBuffer.wrap(result.getRow()).order(ByteOrder.BIG_ENDIAN); bb.position(1); long hbClassId = bb.getLong(); long hbLabelsId = bb.getLong(); // If classId/labelsId are incoherent, skip metadata if (classId != hbClassId || labelsId != hbLabelsId) { LOG.warn("Incoherent class/labels Id for " + metadata); continue; } metadata.setClassId(classId); metadata.setLabelsId(labelsId); if (!metadata.isSetAttributes()) { metadata.setAttributes(new HashMap<String, String>()); } // // Internalize Strings // GTSHelper.internalizeStrings(metadata); // // Let the DirectoryPlugin handle the Metadata // if (null != plugin) { long nano = 0; try { GTS gts = new GTS(new UUID(metadata.getClassId(), metadata.getLabelsId()), metadata.getName(), metadata.getLabels(), metadata.getAttributes()); nano = System.nanoTime(); if (!plugin.store(null, gts)) { throw new RuntimeException( "Error storing GTS " + gts + " using external plugin."); } } finally { nano = System.nanoTime() - nano; Sensision.update( SensisionConstants.SENSISION_CLASS_CONTINUUM_DIRECTORY_PLUGIN_STORE_CALLS, Sensision.EMPTY_LABELS, 1); Sensision.update( SensisionConstants.SENSISION_CLASS_CONTINUUM_DIRECTORY_PLUGIN_STORE_TIME_NANOS, Sensision.EMPTY_LABELS, nano); } continue; } try { metadatasLock.lockInterruptibly(); if (!metadatas.containsKey(metadata.getName())) { metadatas.put(metadata.getName(), new ConcurrentSkipListMap<Long, Metadata>(ID_COMPARATOR)); classNames.put(classId, metadata.getName()); } } finally { if (metadatasLock.isHeldByCurrentThread()) { metadatasLock.unlock(); } } // // Store per owner class name. We use the name since it has been internalized, // therefore we only consume the HashNode and the HashSet overhead // String owner = metadata.getLabels().get(Constants.OWNER_LABEL); synchronized (classesPerOwner) { Set<String> classes = classesPerOwner.get(owner); if (null == classes) { classes = new ConcurrentSkipListSet<String>(); classesPerOwner.put(owner, classes); } classes.add(metadata.getName()); } Sensision.set(SensisionConstants.SENSISION_CLASS_CONTINUUM_DIRECTORY_OWNERS, Sensision.EMPTY_LABELS, classesPerOwner.size()); synchronized (metadatas.get(metadata.getName())) { if (!metadatas.get(metadata.getName()).containsKey(labelsId)) { metadatas.get(metadata.getName()).put(labelsId, metadata); continue; } else if (!metadatas.get(metadata.getName()).get(labelsId).getLabels() .equals(metadata.getLabels())) { LOG.warn("LabelsId collision under class '" + metadata.getName() + "' " + metadata.getLabels() + " and " + metadatas.get(metadata.getName()).get(labelsId).getLabels()); Sensision.update(SensisionConstants.CLASS_WARP_DIRECTORY_LABELS_COLLISIONS, Sensision.EMPTY_LABELS, 1); } } continue; } catch (InvalidCipherTextException icte) { throw new RuntimeException(icte); } catch (TException te) { throw new RuntimeException(te); } catch (InterruptedException ie) { } } } }); initThreads[i].setDaemon(true); initThreads[i].setName("[Directory initializer #" + i + "]"); initThreads[i].start(); } Thread populator = new Thread(new Runnable() { @Override public void run() { long nano = System.nanoTime(); Table htable = null; long count = 0L; boolean done = false; byte[] lastrow = HBASE_METADATA_KEY_PREFIX; while (!done) { try { // // Populate the metadata cache with initial data from HBase // htable = self.conn.getTable(self.hbaseTable); Scan scan = new Scan(); scan.setStartRow(lastrow); // FIXME(hbs): we know the prefix is 'M', so we use 'N' as the stoprow scan.setStopRow("N".getBytes(Charsets.UTF_8)); scan.addFamily(self.colfam); scan.setCaching(10000); scan.setBatch(10000); scan.setMaxResultSize(1000000L); ResultScanner scanner = htable.getScanner(scan); do { Result result = scanner.next(); if (null == result) { done = true; break; } // // FIXME(hbs): this could be done in a filter on the RS side // int r = (((int) result.getRow()[HBASE_METADATA_KEY_PREFIX.length + 8]) & 0xff) % self.modulus; //byte r = (byte) (result.getRow()[HBASE_METADATA_KEY_PREFIX.length + 8] % self.modulus); // Skip metadata if its modulus is not the one we expect if (self.remainder != r) { continue; } // // Store the current row so we can restart from there if an exception occurs // lastrow = result.getRow(); boolean interrupted = true; while (interrupted) { interrupted = false; try { resultQ.put(result); count++; if (0 == count % 1000) { Sensision.set( SensisionConstants.SENSISION_CLASS_CONTINUUM_DIRECTORY_GTS, Sensision.EMPTY_LABELS, count); } } catch (InterruptedException ie) { interrupted = true; } } } while (true); } catch (Exception e) { LOG.error( "Caught exception in scanning loop, will attempt to continue where we stopped", e); } finally { if (null != htable) { try { htable.close(); } catch (Exception e) { } } Sensision.set(SensisionConstants.SENSISION_CLASS_CONTINUUM_DIRECTORY_GTS, Sensision.EMPTY_LABELS, count); } } // // Wait until resultQ is empty // while (!resultQ.isEmpty()) { LockSupport.parkNanos(100000000L); } // // Notify the init threads to stop // for (int i = 0; i < initNThreads; i++) { stopMarkers[i].set(true); } self.cachePopulated.set(true); nano = System.nanoTime() - nano; LOG.info("Loaded " + count + " GTS in " + (nano / 1000000.0D) + " ms"); } }); populator.setName("Warp Directory Populator"); populator.setDaemon(true); populator.start(); } else { LOG.info("Skipped initialization"); this.cachePopulated.set(true); } this.commitPeriod = Long.valueOf( properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_KAFKA_METADATA_COMMITPERIOD)); this.maxPendingPutsSize = Long.parseLong(properties .getProperty(io.warp10.continuum.Configuration.DIRECTORY_HBASE_METADATA_MAXPENDINGPUTSSIZE)); this.host = properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_HOST); this.port = Integer.parseInt(properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_PORT)); this.streamingport = Integer .parseInt(properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_STREAMING_PORT)); this.streamingacceptors = Integer .parseInt(properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_STREAMING_ACCEPTORS)); this.streamingselectors = Integer .parseInt(properties.getProperty(io.warp10.continuum.Configuration.DIRECTORY_STREAMING_SELECTORS)); int streamingMaxThreads = Integer .parseInt(props.getProperty(io.warp10.continuum.Configuration.DIRECTORY_STREAMING_THREADPOOL)); final String groupid = properties .getProperty(io.warp10.continuum.Configuration.DIRECTORY_KAFKA_METADATA_GROUPID); final KafkaOffsetCounters counters = new KafkaOffsetCounters(topic, groupid, this.commitPeriod * 2); Thread t = new Thread(new Runnable() { @Override public void run() { // // Wait until directory is fully initialized // while (!self.fullyInitialized.get()) { LockSupport.parkNanos(1000000000L); } Sensision.set(SensisionConstants.SENSISION_CLASS_CONTINUUM_DIRECTORY_CLASSES, Sensision.EMPTY_LABELS, classNames.size()); // // Enter an endless loop which will spawn 'nthreads' threads // each time the Kafka consumer is shut down (which will happen if an error // happens while talking to HBase, to get a chance to re-read data from the // previous snapshot). // while (true) { try { Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); topicCountMap.put(topic, nthreads); Properties props = new Properties(); props.setProperty("zookeeper.connect", properties .getProperty(io.warp10.continuum.Configuration.DIRECTORY_KAFKA_METADATA_ZKCONNECT)); props.setProperty("group.id", groupid); if (null != properties.getProperty( io.warp10.continuum.Configuration.DIRECTORY_KAFKA_METADATA_CONSUMER_CLIENTID)) { props.setProperty("client.id", properties.getProperty( io.warp10.continuum.Configuration.DIRECTORY_KAFKA_METADATA_CONSUMER_CLIENTID)); } if (null != properties.getProperty( io.warp10.continuum.Configuration.DIRECTORY_KAFKA_METADATA_CONSUMER_PARTITION_ASSIGNMENT_STRATEGY)) { props.setProperty("partition.assignment.strategy", properties.getProperty( io.warp10.continuum.Configuration.DIRECTORY_KAFKA_METADATA_CONSUMER_PARTITION_ASSIGNMENT_STRATEGY)); } props.setProperty("auto.commit.enable", "false"); if (null != properties.getProperty( io.warp10.continuum.Configuration.DIRECTORY_KAFKA_METADATA_CONSUMER_AUTO_OFFSET_RESET)) { props.setProperty("auto.offset.reset", properties.getProperty( io.warp10.continuum.Configuration.DIRECTORY_KAFKA_METADATA_CONSUMER_AUTO_OFFSET_RESET)); } ConsumerConfig config = new ConsumerConfig(props); ConsumerConnector connector = Consumer.createJavaConsumerConnector(config); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = connector .createMessageStreams(topicCountMap); List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic); self.barrier = new CyclicBarrier(streams.size() + 1); ExecutorService executor = Executors.newFixedThreadPool(nthreads); // // now create runnables which will consume messages // // Reset counters counters.reset(); for (final KafkaStream<byte[], byte[]> stream : streams) { executor.submit(new DirectoryConsumer(self, stream, counters)); } while (!abort.get() && !Thread.currentThread().isInterrupted()) { try { if (streams.size() == barrier.getNumberWaiting()) { // // Check if we should abort, which could happen when // an exception was thrown when flushing the commits just before // entering the barrier // if (abort.get()) { break; } // // All processing threads are waiting on the barrier, this means we can flush the offsets because // they have all processed data successfully for the given activity period // // Commit offsets connector.commitOffsets(true); counters.commit(); counters.sensisionPublish(); Sensision.update( SensisionConstants.SENSISION_CLASS_CONTINUUM_DIRECTORY_KAFKA_COMMITS, Sensision.EMPTY_LABELS, 1); // Release the waiting threads try { barrier.await(); } catch (Exception e) { break; } } } catch (Throwable t) { // We need to catch possible errors in commitOffsets LOG.error("", t); abort.set(true); } LockSupport.parkNanos(1000000L); } // // We exited the loop, this means one of the threads triggered an abort, // we will shut down the executor and shut down the connector to start over. // executor.shutdownNow(); connector.shutdown(); abort.set(false); } catch (Throwable t) { LOG.error("", t); } finally { LockSupport.parkNanos(1000000000L); } } } }); t.setName("Warp Directory Spawner"); t.setDaemon(true); t.start(); t = new Thread(this); t.setName("Warp Directory"); t.setDaemon(true); t.start(); // // Start Jetty for the streaming service // // // Start Jetty server for the streaming service // BlockingArrayQueue<Runnable> queue = null; if (props.containsKey(io.warp10.continuum.Configuration.DIRECTORY_STREAMING_MAXQUEUESIZE)) { int queuesize = Integer.parseInt( props.getProperty(io.warp10.continuum.Configuration.DIRECTORY_STREAMING_MAXQUEUESIZE)); queue = new BlockingArrayQueue<Runnable>(queuesize); } Server server = new Server(new QueuedThreadPool(streamingMaxThreads, 8, (int) idleTimeout, queue)); //ServerConnector connector = new ServerConnector(server, this.streamingacceptors, this.streamingselectors); HttpConfiguration config = new HttpConfiguration(); config.setRequestHeaderSize(DIRECTORY_REQUEST_HEADER_SIZE); HttpConnectionFactory factory = new HttpConnectionFactory(config); ServerConnector connector = new ServerConnector(server, null, null, null, this.streamingacceptors, this.streamingselectors, factory); connector.setIdleTimeout(idleTimeout); connector.setPort(this.streamingport); connector.setHost(host); connector.setName("Directory Streaming Service"); server.setConnectors(new Connector[] { connector }); server.setHandler(this); JettyUtil.setSendServerVersion(server, false); // // Wait for initialization to be done // while (!this.fullyInitialized.get()) { LockSupport.parkNanos(1000000000L); } try { server.start(); } catch (Exception e) { throw new RuntimeException(e); } }
From source file:io.warp10.crypto.CryptoUtils.java
License:Apache License
public static byte[] unwrap(byte[] key, byte[] data) { ///* w w w .j a v a2s . c o m*/ // Decrypt the encrypted data // AESWrapEngine engine = new AESWrapEngine(); CipherParameters params = new KeyParameter(key); engine.init(false, params); try { byte[] decrypted = engine.unwrap(data, 0, data.length); // // Unpad the decrypted data // PKCS7Padding padding = new PKCS7Padding(); int padcount = padding.padCount(decrypted); // // Remove padding // decrypted = Arrays.copyOfRange(decrypted, 0, decrypted.length - padcount); return decrypted; } catch (InvalidCipherTextException icte) { return null; } }
From source file:io.warp10.standalone.StandaloneDirectoryClient.java
License:Apache License
public StandaloneDirectoryClient(DB db, final KeyStore keystore) { this.initNThreads = Integer.parseInt(WarpConfig.getProperties() .getProperty(Configuration.DIRECTORY_INIT_NTHREADS, DIRECTORY_INIT_NTHREADS_DEFAULT)); this.db = db; this.keystore = keystore; this.aesKey = this.keystore.getKey(KeyStore.AES_LEVELDB_METADATA); this.classKey = this.keystore.getKey(KeyStore.SIPHASH_CLASS); this.classLongs = SipHashInline.getKey(this.classKey); this.labelsKey = this.keystore.getKey(KeyStore.SIPHASH_LABELS); this.labelsLongs = SipHashInline.getKey(this.labelsKey); ////from ww w .ja v a2 s . c om // Read metadata from DB // if (null == db) { return; } DBIterator iter = db.iterator(); iter.seek(METADATA_PREFIX); byte[] stop = "N".getBytes(Charsets.US_ASCII); long count = 0; Thread[] initThreads = new Thread[this.initNThreads]; final AtomicBoolean[] stopMarkers = new AtomicBoolean[this.initNThreads]; final LinkedBlockingQueue<Entry<byte[], byte[]>> resultQ = new LinkedBlockingQueue<Entry<byte[], byte[]>>( initThreads.length * 8192); for (int i = 0; i < initThreads.length; i++) { stopMarkers[i] = new AtomicBoolean(false); final AtomicBoolean stopMe = stopMarkers[i]; initThreads[i] = new Thread(new Runnable() { @Override public void run() { byte[] bytes = new byte[16]; AESWrapEngine engine = null; PKCS7Padding padding = null; if (null != keystore.getKey(KeyStore.AES_LEVELDB_METADATA)) { engine = new AESWrapEngine(); CipherParameters params = new KeyParameter(keystore.getKey(KeyStore.AES_LEVELDB_METADATA)); engine.init(false, params); padding = new PKCS7Padding(); } TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory()); while (!stopMe.get()) { try { Entry<byte[], byte[]> result = resultQ.poll(100, TimeUnit.MILLISECONDS); if (null == result) { continue; } byte[] key = result.getKey(); byte[] value = result.getValue(); // // Unwrap // byte[] unwrapped = null != engine ? engine.unwrap(value, 0, value.length) : value; // // Unpad // int padcount = null != padding ? padding.padCount(unwrapped) : 0; byte[] unpadded = null != padding ? Arrays.copyOf(unwrapped, unwrapped.length - padcount) : unwrapped; // // Deserialize // Metadata metadata = new Metadata(); deserializer.deserialize(metadata, unpadded); // // Compute classId/labelsId and compare it to the values in the row key // // 128BITS long classId = GTSHelper.classId(classLongs, metadata.getName()); long labelsId = GTSHelper.labelsId(labelsLongs, metadata.getLabels()); ByteBuffer bb = ByteBuffer.wrap(key).order(ByteOrder.BIG_ENDIAN); bb.position(1); long hbClassId = bb.getLong(); long hbLabelsId = bb.getLong(); // If classId/labelsId are incoherent, skip metadata if (classId != hbClassId || labelsId != hbLabelsId) { // FIXME(hbs): LOG System.err.println("Incoherent class/labels Id for " + metadata); continue; } // 128BITS metadata.setClassId(classId); metadata.setLabelsId(labelsId); if (!metadata.isSetAttributes()) { metadata.setAttributes(new HashMap<String, String>()); } // // Internalize Strings // GTSHelper.internalizeStrings(metadata); synchronized (metadatas) { if (!metadatas.containsKey(metadata.getName())) { metadatas.put(metadata.getName(), (Map) new MapMaker().concurrencyLevel(64).makeMap()); } } synchronized (metadatas.get(metadata.getName())) { if (!metadatas.get(metadata.getName()).containsKey(labelsId)) { metadatas.get(metadata.getName()).put(labelsId, metadata); // // Store Metadata under 'id' // // 128BITS GTSHelper.fillGTSIds(bytes, 0, classId, labelsId); BigInteger id = new BigInteger(bytes); metadatasById.put(id, metadata); continue; } } // FIXME(hbs): LOG System.err.println("Duplicate labelsId for classId " + classId + ": " + metadata); continue; } catch (InvalidCipherTextException icte) { throw new RuntimeException(icte); } catch (TException te) { throw new RuntimeException(te); } catch (InterruptedException ie) { } } } }); initThreads[i].setDaemon(true); initThreads[i].setName("[Directory initializer #" + i + "]"); initThreads[i].start(); } try { long nano = System.nanoTime(); while (iter.hasNext()) { Entry<byte[], byte[]> kv = iter.next(); byte[] key = kv.getKey(); if (Bytes.compareTo(key, stop) >= 0) { break; } boolean interrupted = true; while (interrupted) { interrupted = false; try { resultQ.put(kv); count++; if (0 == count % 1000) { Sensision.set(SensisionConstants.SENSISION_CLASS_CONTINUUM_DIRECTORY_GTS, Sensision.EMPTY_LABELS, count); } } catch (InterruptedException ie) { interrupted = true; } } } // // Wait until resultQ is empty // while (!resultQ.isEmpty()) { try { Thread.sleep(100L); } catch (InterruptedException ie) { } } // // Notify the init threads to stop // for (int i = 0; i < initNThreads; i++) { stopMarkers[i].set(true); } nano = System.nanoTime() - nano; System.out.println("Loaded " + count + " GTS in " + (nano / 1000000.0D) + " ms"); } finally { Sensision.set(SensisionConstants.SENSISION_CLASS_CONTINUUM_DIRECTORY_GTS, Sensision.EMPTY_LABELS, count); try { iter.close(); } catch (IOException ioe) { throw new RuntimeException(ioe); } } }
From source file:net.sourceforge.keepassj2me.importerv3.ImporterV3.java
License:Open Source License
/** * Load a v3 database file, return contents in a new PwManager. * // w ww. j a v a 2 s . c om * @param infile Existing file to load. * @param password Pass phrase for infile. * @param pRepair (unused) * @return new PwManager container. * * @throws IOException on any file error. * @throws InvalidKeyException on a decryption error, or possible internal bug. * @throws IllegalBlockSizeException on a decryption error, or possible internal bug. * @throws BadPaddingException on a decryption error, or possible internal bug. * @throws NoSuchAlgorithmException on a decryption error, or possible internal bug. * @throws NoSuchPaddingException on a decryption error, or possible internal bug. * @throws InvalidAlgorithmParameterException if error decrypting main file body. * @throws ShortBufferException if error decrypting main file body. */ public PwManager openDatabase(InputStream inStream, String password) throws IOException, InvalidCipherTextException, Exception { PwManager newManager; SHA256Digest md; /** Master key encrypted several times */ byte[] transformedMasterKey; byte[] finalKey; setProgress(5, "Open database"); // #ifdef DEBUG System.out.println("Open database"); // #endif // Load entire file, most of it's encrypted. // InputStream in = new FileInputStream( infile ); byte[] filebuf = new byte[(int) inStream.available()]; inStream.read(filebuf, 0, (int) inStream.available()); inStream.close(); // Parse header (unencrypted) if (filebuf.length < PwDbHeader.BUF_SIZE) throw new IOException("File too short for header"); PwDbHeader hdr = new PwDbHeader(filebuf, 0); if ((hdr.signature1 != PwManager.PWM_DBSIG_1) || (hdr.signature2 != PwManager.PWM_DBSIG_2)) { // #ifdef DEBUG System.out.println("Bad database file signature"); // #endif throw new IOException("Bad database file signature"); } if (hdr.version != PwManager.PWM_DBVER_DW) { // #ifdef DEBUG System.out.println("Bad database file version"); // #endif throw new IOException("Bad database file version"); } newManager = new PwManager(); newManager.setMasterKey(password); // Select algorithm if ((hdr.flags & PwManager.PWM_FLAG_RIJNDAEL) != 0) { // #ifdef DEBUG System.out.println("Algorithm AES"); // #endif newManager.algorithm = PwManager.ALGO_AES; } else if ((hdr.flags & PwManager.PWM_FLAG_TWOFISH) != 0) { // #ifdef DEBUG System.out.println("Algorithm TWOFISH"); // #endif newManager.algorithm = PwManager.ALGO_TWOFISH; } else { throw new IOException("Unknown algorithm."); } if (newManager.algorithm == PwManager.ALGO_TWOFISH) throw new IOException("TwoFish algorithm is not supported"); newManager.numKeyEncRounds = hdr.numKeyEncRounds; // #ifdef DEBUG System.out.println("rounds = " + newManager.numKeyEncRounds); // #endif // testRijndael_JCE(); newManager.name = "KeePass Password Manager"; // Generate transformedMasterKey from masterKey //KeePassMIDlet.logS ("masterSeed2: " + new String(Hex.encode(hdr.masterSeed2))); setProgress(10, "Decrypt key"); transformedMasterKey = transformMasterKey(hdr.masterSeed2, newManager.masterKey, newManager.numKeyEncRounds); // Hash the master password with the salt in the file md = new SHA256Digest(); md.update(hdr.masterSeed, 0, hdr.masterSeed.length); md.update(transformedMasterKey, 0, transformedMasterKey.length); finalKey = new byte[md.getDigestSize()]; md.doFinal(finalKey, 0); setProgress(90, "Decrypt database"); // NI //KeePassMIDlet.logS ("finalKey: " + new String(Hex.encode(finalKey))); // Initialize Rijndael algorithm // Cipher cipher = Cipher.getInstance( "AES/CBC/PKCS5Padding" ); //PaddedBufferedBlockCipher cipher = new PaddedBufferedBlockCipher(new CBCBlockCipher(new AESEngine())); BufferedBlockCipher cipher = new BufferedBlockCipher(new CBCBlockCipher(new AESEngine())); //cipher.init( Cipher.DECRYPT_MODE, new SecretKeySpec( finalKey, "AES" ), new IvParameterSpec( hdr.encryptionIV ) ); cipher.init(false, new ParametersWithIV(new KeyParameter(finalKey), hdr.encryptionIV)); // Decrypt! The first bytes aren't encrypted (that's the header) //int encryptedPartSize = cipher.doFinal( filebuf, PwDbHeader.BUF_SIZE, filebuf.length - PwDbHeader.BUF_SIZE, filebuf, PwDbHeader.BUF_SIZE ); //int encryptedPartSize int paddedEncryptedPartSize = cipher.processBytes(filebuf, PwDbHeader.BUF_SIZE, filebuf.length - PwDbHeader.BUF_SIZE, filebuf, PwDbHeader.BUF_SIZE); int encryptedPartSize = 0; //try { PKCS7Padding padding = new PKCS7Padding(); encryptedPartSize = paddedEncryptedPartSize - padding.padCount(filebuf); //} catch (Exception e) { //} // NI byte[] plainContent = new byte[encryptedPartSize]; System.arraycopy(filebuf, PwDbHeader.BUF_SIZE, plainContent, 0, encryptedPartSize); // #ifdef DEBUG System.out.println("filebuf length: " + filebuf.length); // #endif //System.out.println ("file length: " + filebuf.length); //System.out.println ("plaintext contents length: " + encryptedPartSize); //System.out.println ("plaintext contents:\n" + new String(Hex.encode(plainContent))); //if( pRepair == null ) { //md = MessageDigest.getInstance( "SHA-256" ); md = new SHA256Digest(); md.update(filebuf, PwDbHeader.BUF_SIZE, encryptedPartSize); // md.update( makePad(filebuf) ); md.doFinal(finalKey, 0); if (Util.compare(finalKey, hdr.contentsHash) == false) { //KeePassMIDlet.logS ( "Database file did not decrypt correctly. (checksum code is broken)" ); // #ifdef DEBUG System.out.println("Database file did not decrypt correctly. (checksum code is broken)"); // #endif throw new Exception( "Wrong Password, or Database File Corrupted (database file did not decrypt correctly)"); } // } setProgress(95, "Import groups"); // Import all groups // #ifdef DEBUG System.out.println("Import all groups"); // #endif int pos = PwDbHeader.BUF_SIZE; PwGroup newGrp = new PwGroup(); for (int i = 0; i < hdr.numGroups;) { int fieldType = Types.readShort(filebuf, pos); pos += 2; int fieldSize = Types.readInt(filebuf, pos); pos += 4; if (fieldType == 0xFFFF) { // #ifdef DEBUG System.out.println(newGrp.level + " " + newGrp.name); // #endif // End-Group record. Save group and count it. //newManager.groups.add( newGrp ); newManager.addGroup(newGrp); newGrp = new PwGroup(); i++; } else { readGroupField(newGrp, fieldType, filebuf, pos); } pos += fieldSize; } // fixGroups( groups ); setProgress(97, "Import entries"); // Import all entries // #ifdef DEBUG System.out.println("Import all entries"); // #endif PwEntry newEnt = new PwEntry(); for (int i = 0; i < hdr.numEntries;) { int fieldType = Types.readShort(filebuf, pos); int fieldSize = Types.readInt(filebuf, pos + 2); if (fieldType == 0xFFFF) { // End-Group record. Save group and count it. newManager.addEntry(newEnt); // #ifdef DEBUG System.out.println(newEnt.title); // #endif newEnt = new PwEntry(); i++; } else { readEntryField(newEnt, filebuf, pos); } pos += 2 + 4 + fieldSize; } // Keep the Meta-Info entry separate // #ifdef DEBUG System.out.println("Keep the Meta-Info entry separate"); // #endif for (int i = 0; i < newManager.entries.size(); i++) { PwEntry ent = (PwEntry) newManager.entries.elementAt(i); if (ent.title.equals("Meta-Info") && ent.url.equals("$") && ent.username.equals("SYSTEM")) { newManager.metaInfo = ent; newManager.entries.removeElementAt(i); } } setProgress(100, "Done"); // #ifdef DEBUG System.out.println("Return newManager: " + newManager); // #endif return newManager; }
From source file:net.sourceforge.keepassj2me.keydb.KeydbDatabase.java
License:Open Source License
private void decrypt(byte[] encoded, int offset, int length) throws KeydbException { BufferedBlockCipher cipher = new BufferedBlockCipher(new CBCBlockCipher(new AESEngine())); cipher.init(false, new ParametersWithIV(new KeyParameter(this.key), this.header.encryptionIV)); // Decrypt! The first bytes aren't encrypted (that's the header) this.plainContent = new byte[encoded.length - KeydbHeader.SIZE]; int paddedEncryptedPartSize = cipher.processBytes(encoded, offset, length, this.plainContent, 0); //detect padding and calc content size this.contentSize = 0; PKCS7Padding padding = new PKCS7Padding(); try {//from w ww . ja v a2 s .com this.contentSize = paddedEncryptedPartSize - padding.padCount(this.plainContent); } catch (InvalidCipherTextException e) { throw new KeydbException(Config.getLocaleString(keys.KD_DB_DECRYPT_ERR, new String[] { "1" })); } if (!KeydbUtil.compare(KeydbUtil.hash(this.plainContent, 0, this.contentSize), this.header.contentsHash)) { throw new KeydbException(Config.getLocaleString(keys.KD_DB_DECRYPT_ERR, new String[] { "2" })); } }