List of usage examples for org.apache.lucene.util SetOnce SetOnce
public SetOnce()
From source file:org.elasticsearch.common.xcontent.XContentParserUtilsTests.java
License:Apache License
public void testParseTypedKeysObject() throws IOException { final String delimiter = randomFrom("#", ":", "/", "-", "_", "|", "_delim_"); final XContentType xContentType = randomFrom(XContentType.values()); final ObjectParser<SetOnce<Boolean>, Void> BOOLPARSER = new ObjectParser<>("bool", () -> new SetOnce<>()); BOOLPARSER.declareBoolean(SetOnce::set, new ParseField("field")); final ObjectParser<SetOnce<Long>, Void> LONGPARSER = new ObjectParser<>("long", () -> new SetOnce<>()); LONGPARSER.declareLong(SetOnce::set, new ParseField("field")); List<NamedXContentRegistry.Entry> namedXContents = new ArrayList<>(); namedXContents.add(new NamedXContentRegistry.Entry(Boolean.class, new ParseField("bool"), p -> BOOLPARSER.parse(p, null).get())); namedXContents.add(new NamedXContentRegistry.Entry(Long.class, new ParseField("long"), p -> LONGPARSER.parse(p, null).get())); final NamedXContentRegistry namedXContentRegistry = new NamedXContentRegistry(namedXContents); BytesReference bytes = toXContent(// ww w .j a v a 2 s . com (builder, params) -> builder.startObject("name").field("field", 0).endObject(), xContentType, randomBoolean()); try (XContentParser parser = xContentType.xContent().createParser(namedXContentRegistry, bytes)) { ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.nextToken(), parser::getTokenLocation); ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); SetOnce<Boolean> booleanConsumer = new SetOnce<>(); parseTypedKeysObject(parser, delimiter, Boolean.class, booleanConsumer::set); // because of the missing type to identify the parser, we expect no return value, but also no exception assertNull(booleanConsumer.get()); ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.currentToken(), parser::getTokenLocation); ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser::getTokenLocation); assertNull(parser.nextToken()); } bytes = toXContent((builder, params) -> builder.startObject("type" + delimiter + "name").field("bool", true) .endObject(), xContentType, randomBoolean()); try (XContentParser parser = xContentType.xContent().createParser(namedXContentRegistry, bytes)) { ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.nextToken(), parser::getTokenLocation); ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); NamedXContentRegistry.UnknownNamedObjectException e = expectThrows( NamedXContentRegistry.UnknownNamedObjectException.class, () -> parseTypedKeysObject(parser, delimiter, Boolean.class, a -> { })); assertEquals("Unknown Boolean [type]", e.getMessage()); assertEquals("type", e.getName()); assertEquals("java.lang.Boolean", e.getCategoryClass()); } final long longValue = randomLong(); final boolean boolValue = randomBoolean(); bytes = toXContent((builder, params) -> { builder.startObject("long" + delimiter + "l").field("field", longValue).endObject(); builder.startObject("bool" + delimiter + "l").field("field", boolValue).endObject(); return builder; }, xContentType, randomBoolean()); try (XContentParser parser = xContentType.xContent().createParser(namedXContentRegistry, bytes)) { ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.nextToken(), parser::getTokenLocation); ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); SetOnce<Long> parsedLong = new SetOnce<>(); parseTypedKeysObject(parser, delimiter, Long.class, parsedLong::set); assertNotNull(parsedLong); assertEquals(longValue, parsedLong.get().longValue()); ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.nextToken(), parser::getTokenLocation); ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); SetOnce<Boolean> parsedBoolean = new SetOnce<>(); parseTypedKeysObject(parser, delimiter, Boolean.class, parsedBoolean::set); assertNotNull(parsedBoolean); assertEquals(boolValue, parsedBoolean.get()); ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser::getTokenLocation); } }
From source file:org.elasticsearch.ingest.geoip.DatabaseReaderLazyLoader.java
License:Apache License
DatabaseReaderLazyLoader(String databaseFileName, CheckedSupplier<DatabaseReader, IOException> loader) {
this.databaseFileName = databaseFileName;
this.loader = loader;
this.databaseReader = new SetOnce<>();
}
From source file:org.elasticsearch.ingest.PipelineExecutionServiceTests.java
License:Apache License
public void testExecuteIndexPipelineExistsButFailedParsing() { when(store.get("_id")) .thenReturn(new Pipeline("_id", "stub", null, new CompoundProcessor(new AbstractProcessor("mock") { @Override//from w w w. j a v a 2 s. c o m public void execute(IngestDocument ingestDocument) { throw new IllegalStateException("error"); } @Override public String getType() { return null; } }))); SetOnce<Boolean> failed = new SetOnce<>(); IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()) .setPipeline("_id"); Consumer<Exception> failureHandler = (e) -> { assertThat(e.getCause().getClass(), equalTo(IllegalArgumentException.class)); assertThat(e.getCause().getCause().getClass(), equalTo(IllegalStateException.class)); assertThat(e.getCause().getCause().getMessage(), equalTo("error")); failed.set(true); }; Consumer<Boolean> completionHandler = (e) -> failed.set(false); executionService.executeIndexRequest(indexRequest, failureHandler, completionHandler); assertTrue(failed.get()); }
From source file:org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase.java
License:Apache License
public void testIndicesDeletedFromRepository() throws Exception { Client client = client();/*from w ww . j a v a2s . c om*/ logger.info("--> creating repository"); final String repoName = "test-repo"; createAndCheckTestRepository(repoName); createIndex("test-idx-1", "test-idx-2", "test-idx-3"); ensureGreen(); logger.info("--> indexing some data"); for (int i = 0; i < 20; i++) { index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i); index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i); index("test-idx-3", "doc", Integer.toString(i), "foo", "baz" + i); } refresh(); logger.info("--> take a snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin().cluster() .prepareCreateSnapshot(repoName, "test-snap").setWaitForCompletion(true).get(); assertEquals(createSnapshotResponse.getSnapshotInfo().successfulShards(), createSnapshotResponse.getSnapshotInfo().totalShards()); logger.info("--> indexing more data"); for (int i = 20; i < 40; i++) { index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i); index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i); index("test-idx-3", "doc", Integer.toString(i), "foo", "baz" + i); } logger.info("--> take another snapshot with only 2 of the 3 indices"); createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot(repoName, "test-snap2") .setWaitForCompletion(true).setIndices("test-idx-1", "test-idx-2").get(); assertEquals(createSnapshotResponse.getSnapshotInfo().successfulShards(), createSnapshotResponse.getSnapshotInfo().totalShards()); logger.info("--> delete a snapshot"); assertAcked(client().admin().cluster().prepareDeleteSnapshot(repoName, "test-snap").get()); logger.info("--> verify index folder deleted from blob container"); RepositoriesService repositoriesSvc = internalCluster().getInstance(RepositoriesService.class, internalCluster().getMasterName()); ThreadPool threadPool = internalCluster().getInstance(ThreadPool.class, internalCluster().getMasterName()); BlobStoreRepository repository = (BlobStoreRepository) repositoriesSvc.repository(repoName); final SetOnce<BlobContainer> indicesBlobContainer = new SetOnce<>(); final SetOnce<RepositoryData> repositoryData = new SetOnce<>(); final CountDownLatch latch = new CountDownLatch(1); threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> { indicesBlobContainer.set(repository.blobStore().blobContainer(repository.basePath().add("indices"))); repositoryData.set(repository.getRepositoryData()); latch.countDown(); }); latch.await(); for (IndexId indexId : repositoryData.get().getIndices().values()) { if (indexId.getName().equals("test-idx-3")) { assertFalse(indicesBlobContainer.get().blobExists(indexId.getId())); // deleted index } } }
From source file:org.elasticsearch.repositories.s3.S3BlobContainer.java
License:Apache License
/** * Uploads a blob using multipart upload requests. */// w w w.ja va 2s .co m void executeMultipartUpload(final S3BlobStore blobStore, final String blobName, final InputStream input, final long blobSize) throws IOException { if (blobSize > MAX_FILE_SIZE_USING_MULTIPART.getBytes()) { throw new IllegalArgumentException("Multipart upload request size [" + blobSize + "] can't be larger than " + MAX_FILE_SIZE_USING_MULTIPART); } if (blobSize < MIN_PART_SIZE_USING_MULTIPART.getBytes()) { throw new IllegalArgumentException("Multipart upload request size [" + blobSize + "] can't be smaller than " + MIN_PART_SIZE_USING_MULTIPART); } final long partSize = blobStore.bufferSizeInBytes(); final Tuple<Long, Long> multiparts = numberOfMultiparts(blobSize, partSize); if (multiparts.v1() > Integer.MAX_VALUE) { throw new IllegalArgumentException( "Too many multipart upload requests, maybe try a larger buffer size?"); } final int nbParts = multiparts.v1().intValue(); final long lastPartSize = multiparts.v2(); assert blobSize == (nbParts - 1) * partSize + lastPartSize : "blobSize does not match multipart sizes"; final SetOnce<String> uploadId = new SetOnce<>(); final String bucketName = blobStore.bucket(); boolean success = false; try { final InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, blobName); initRequest.setStorageClass(blobStore.getStorageClass()); initRequest.setCannedACL(blobStore.getCannedACL()); if (blobStore.serverSideEncryption()) { final ObjectMetadata md = new ObjectMetadata(); md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); initRequest.setObjectMetadata(md); } uploadId.set(blobStore.client().initiateMultipartUpload(initRequest).getUploadId()); if (Strings.isEmpty(uploadId.get())) { throw new IOException("Failed to initialize multipart upload " + blobName); } final List<PartETag> parts = new ArrayList<>(); long bytesCount = 0; for (int i = 1; i <= nbParts; i++) { final UploadPartRequest uploadRequest = new UploadPartRequest(); uploadRequest.setBucketName(bucketName); uploadRequest.setKey(blobName); uploadRequest.setUploadId(uploadId.get()); uploadRequest.setPartNumber(i); uploadRequest.setInputStream(input); if (i < nbParts) { uploadRequest.setPartSize(partSize); uploadRequest.setLastPart(false); } else { uploadRequest.setPartSize(lastPartSize); uploadRequest.setLastPart(true); } bytesCount += uploadRequest.getPartSize(); final UploadPartResult uploadResponse = blobStore.client().uploadPart(uploadRequest); parts.add(uploadResponse.getPartETag()); } if (bytesCount != blobSize) { throw new IOException("Failed to execute multipart upload for [" + blobName + "], expected " + blobSize + "bytes sent but got " + bytesCount); } CompleteMultipartUploadRequest complRequest = new CompleteMultipartUploadRequest(bucketName, blobName, uploadId.get(), parts); blobStore.client().completeMultipartUpload(complRequest); success = true; } catch (AmazonClientException e) { throw new IOException("Unable to upload object [" + blobName + "] using multipart upload", e); } finally { if (success == false && Strings.hasLength(uploadId.get())) { final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(bucketName, blobName, uploadId.get()); blobStore.client().abortMultipartUpload(abortRequest); } } }
From source file:org.elasticsearch.search.aggregations.Aggregations.java
License:Apache License
public static Aggregations fromXContent(XContentParser parser) throws IOException { final List<Aggregation> aggregations = new ArrayList<>(); XContentParser.Token token;/*from w ww .jav a 2 s. com*/ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.START_OBJECT) { SetOnce<Aggregation> typedAgg = new SetOnce<>(); String currentField = parser.currentName(); parseTypedKeysObject(parser, Aggregation.TYPED_KEYS_DELIMITER, Aggregation.class, typedAgg::set); if (typedAgg.get() != null) { aggregations.add(typedAgg.get()); } else { throw new ParsingException(parser.getTokenLocation(), String.format(Locale.ROOT, "Could not parse aggregation keyed as [%s]", currentField)); } } } return new Aggregations(aggregations); }
From source file:org.elasticsearch.test.InternalAggregationTestCase.java
License:Apache License
@SuppressWarnings("unchecked") protected <P extends ParsedAggregation> P parseAndAssert(final InternalAggregation aggregation, final boolean shuffled, final boolean addRandomFields) throws IOException { final ToXContent.Params params = new ToXContent.MapParams( singletonMap(RestSearchAction.TYPED_KEYS_PARAM, "true")); final XContentType xContentType = randomFrom(XContentType.values()); final boolean humanReadable = randomBoolean(); final BytesReference originalBytes; if (shuffled) { originalBytes = toShuffledXContent(aggregation, xContentType, params, humanReadable); } else {/* w ww .j a v a2 s. c o m*/ originalBytes = toXContent(aggregation, xContentType, params, humanReadable); } BytesReference mutated; if (addRandomFields) { /* * - we don't add to the root object because it should only contain * the named aggregation to test - we don't want to insert into the * "meta" object, because we pass on everything we find there * * - we don't want to directly insert anything random into "buckets" * objects, they are used with "keyed" aggregations and contain * named bucket objects. Any new named object on this level should * also be a bucket and be parsed as such. */ Predicate<String> basicExcludes = path -> path.isEmpty() || path.endsWith(Aggregation.CommonFields.META.getPreferredName()) || path.endsWith(Aggregation.CommonFields.BUCKETS.getPreferredName()); Predicate<String> excludes = basicExcludes.or(excludePathsFromXContentInsertion()); mutated = insertRandomFields(xContentType, originalBytes, excludes, random()); } else { mutated = originalBytes; } SetOnce<Aggregation> parsedAggregation = new SetOnce<>(); try (XContentParser parser = createParser(xContentType.xContent(), mutated)) { assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); XContentParserUtils.parseTypedKeysObject(parser, Aggregation.TYPED_KEYS_DELIMITER, Aggregation.class, parsedAggregation::set); assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); assertNull(parser.nextToken()); Aggregation agg = parsedAggregation.get(); assertEquals(aggregation.getName(), agg.getName()); assertEquals(aggregation.getMetaData(), agg.getMetaData()); assertTrue(agg instanceof ParsedAggregation); assertEquals(aggregation.getType(), agg.getType()); BytesReference parsedBytes = toXContent(agg, xContentType, params, humanReadable); assertToXContentEquivalent(originalBytes, parsedBytes, xContentType); return (P) agg; } }
From source file:org.elasticsearch.xpack.security.action.filter.SecurityActionFilterTests.java
License:Open Source License
public void testApplyAsSystemUser() throws Exception { ActionRequest request = mock(ActionRequest.class); ActionListener listener = mock(ActionListener.class); User user = new User("username", "r1", "r2"); Authentication authentication = new Authentication(user, new RealmRef("test", "test", "foo"), null); SetOnce<Authentication> authenticationSetOnce = new SetOnce<>(); ActionFilterChain chain = (task, action, request1, listener1) -> { authenticationSetOnce.set(threadContext.getTransient(AuthenticationField.AUTHENTICATION_KEY)); };// www .j av a 2 s .c om Task task = mock(Task.class); final boolean hasExistingAuthentication = randomBoolean(); final String action = "internal:foo"; if (hasExistingAuthentication) { threadContext.putTransient(AuthenticationField.AUTHENTICATION_KEY, authentication); threadContext.putHeader(AuthenticationField.AUTHENTICATION_KEY, "foo"); threadContext.putTransient(AuthorizationService.ORIGINATING_ACTION_KEY, "indices:foo"); } else { assertNull(threadContext.getTransient(AuthenticationField.AUTHENTICATION_KEY)); } doAnswer((i) -> { ActionListener callback = (ActionListener) i.getArguments()[3]; callback.onResponse(threadContext.getTransient(AuthenticationField.AUTHENTICATION_KEY)); return Void.TYPE; }).when(authcService).authenticate(eq(action), eq(request), eq(SystemUser.INSTANCE), any(ActionListener.class)); filter.apply(task, action, request, listener, chain); if (hasExistingAuthentication) { assertEquals(authentication, threadContext.getTransient(AuthenticationField.AUTHENTICATION_KEY)); } else { assertNull(threadContext.getTransient(AuthenticationField.AUTHENTICATION_KEY)); } assertNotNull(authenticationSetOnce.get()); assertNotEquals(authentication, authenticationSetOnce.get()); assertEquals(SystemUser.INSTANCE, authenticationSetOnce.get().getUser()); }
From source file:org.elasticsearch.xpack.security.audit.index.IndexAuditTrailTests.java
License:Open Source License
private void initialize(final String[] includes, final String[] excludes, final Settings additionalSettings) throws Exception { rollover = randomFrom(HOURLY, DAILY, WEEKLY, MONTHLY); includeRequestBody = randomBoolean(); Settings.Builder builder = Settings.builder(); if (remoteIndexing) { builder.put(remoteSettings);//from ww w. j av a 2 s . com } builder.put(settings(rollover, includes, excludes)).put(additionalSettings).build(); // IndexAuditTrail should ignore secure settings // they are merged on the master node creating the audit index if (randomBoolean()) { MockSecureSettings ignored = new MockSecureSettings(); if (randomBoolean()) { ignored.setString(KeyStoreWrapper.SEED_SETTING.getKey(), "non-empty-secure-settings"); } builder.setSecureSettings(ignored); } Settings settings = builder.build(); logger.info("--> settings: [{}]", settings); DiscoveryNode localNode = mock(DiscoveryNode.class); when(localNode.getHostAddress()).thenReturn(remoteAddress.getAddress()); when(localNode.getHostName()).thenReturn(remoteAddress.getAddress()); ClusterService clusterService = mock(ClusterService.class); ClusterState state = mock(ClusterState.class); DiscoveryNodes nodes = mock(DiscoveryNodes.class); when(clusterService.localNode()).thenReturn(localNode); when(clusterService.state()).thenReturn(client().admin().cluster().prepareState().get().getState()); when(state.getNodes()).thenReturn(nodes); when(nodes.isLocalNodeElectedMaster()).thenReturn(true); threadPool = new TestThreadPool("index audit trail tests"); enqueuedMessage = new SetOnce<>(); auditor = new IndexAuditTrail(settings, client(), threadPool, clusterService) { @Override void enqueue(Message message, String type) { enqueuedMessage.set(message); super.enqueue(message, type); } @Override List<Class<? extends Plugin>> remoteTransportClientPlugins() { return Arrays.asList(LocalStateSecurity.class, getTestTransportPlugin()); } }; auditor.start(); }
From source file:org.elasticsearch.xpack.security.audit.index.IndexAuditTrailTests.java
License:Open Source License
private SearchHit getIndexedAuditMessage(Message message) throws InterruptedException { assertNotNull("no audit message was enqueued", message); final String indexName = IndexNameResolver.resolve(IndexAuditTrailField.INDEX_NAME_PREFIX, message.timestamp, rollover); ensureYellowAndNoInitializingShards(indexName); GetSettingsResponse settingsResponse = getClient().admin().indices().prepareGetSettings(indexName).get(); assertThat(settingsResponse.getSetting(indexName, "index.number_of_shards"), is(Integer.toString(numShards))); assertThat(settingsResponse.getSetting(indexName, "index.number_of_replicas"), is(Integer.toString(numReplicas))); final SetOnce<SearchResponse> searchResponseSetOnce = new SetOnce<>(); final boolean found = awaitBusy(() -> { try {//ww w . jav a 2 s .c om SearchResponse searchResponse = getClient().prepareSearch(indexName) .setTypes(IndexAuditTrail.DOC_TYPE).get(); if (searchResponse.getHits().getTotalHits() > 0L) { searchResponseSetOnce.set(searchResponse); return true; } } catch (Exception e) { logger.debug("caught exception while executing search", e); } return false; }); assertThat("no audit document exists!", found, is(true)); SearchResponse response = searchResponseSetOnce.get(); assertNotNull(response); assertEquals(1, response.getHits().getTotalHits()); return response.getHits().getHits()[0]; }