List of usage examples for java.util.concurrent.atomic AtomicReference set
public final void set(V newValue)
From source file:org.apache.solr.cloud.ZkController.java
private void checkStateInZk(CoreDescriptor cd) throws InterruptedException { if (!Overseer.isLegacy(zkStateReader)) { CloudDescriptor cloudDesc = cd.getCloudDescriptor(); String coreNodeName = cloudDesc.getCoreNodeName(); if (coreNodeName == null) throw new SolrException(ErrorCode.SERVER_ERROR, "No coreNodeName for " + cd); if (cloudDesc.getShardId() == null) { throw new SolrException(ErrorCode.SERVER_ERROR, "No shard id for " + cd); }/* w w w . j a v a 2 s. co m*/ AtomicReference<String> errorMessage = new AtomicReference<>(); AtomicReference<DocCollection> collectionState = new AtomicReference<>(); try { zkStateReader.waitForState(cd.getCollectionName(), 3, TimeUnit.SECONDS, (n, c) -> { collectionState.set(c); if (c == null) return false; Slice slice = c.getSlice(cloudDesc.getShardId()); if (slice == null) { errorMessage.set("Invalid shard: " + cloudDesc.getShardId()); return false; } Replica replica = slice.getReplica(coreNodeName); if (replica == null) { errorMessage.set("coreNodeName " + coreNodeName + " does not exist in shard " + cloudDesc.getShardId()); return false; } String baseUrl = replica.getStr(BASE_URL_PROP); String coreName = replica.getStr(CORE_NAME_PROP); if (baseUrl.equals(this.baseURL) && coreName.equals(cd.getName())) { return true; } errorMessage.set("coreNodeName " + coreNodeName + " exists, but does not match expected node or core name"); return false; }); } catch (TimeoutException e) { String error = errorMessage.get(); if (error == null) error = "Replica " + coreNodeName + " is not present in cluster state"; throw new SolrException(ErrorCode.SERVER_ERROR, error + ": " + collectionState.get()); } } }
From source file:edu.rit.flick.genetics.FastFileInflator.java
@Override public synchronized File inflate(final Configuration configuration, final File fileIn, final File fileOut) { assert fileIn.exists(); try {//from w ww.j av a2 s . c o m // Inflate to Directory final String outputDirectoryPath = fileOut.getPath() .replaceAll("." + Files.getFileExtension(fileOut.getPath()), FLICK_FAST_FILE_TMP_DIR_SUFFIX); final File tmpOutputDirectory = new File(outputDirectoryPath); if (tmpOutputDirectory.exists()) FileUtils.deleteDirectory(tmpOutputDirectory); final AtomicReference<Thread> cleanHookAtomic = new AtomicReference<Thread>(); final Thread inflateToDirectoryThread = new Thread(() -> { try { // Inflate Fast file to a temporary directory inflateFromFile(fileIn, tmpOutputDirectory); // Inflate Directory to a zip file inflateFromDirectory(tmpOutputDirectory, fileOut); // Clean up IO close(); System.gc(); Thread.sleep(100); // Clean up temporary directory FileUtils.deleteDirectory(tmpOutputDirectory); Runtime.getRuntime().removeShutdownHook(cleanHookAtomic.get()); } catch (final Exception e) { if (!interrupted) System.err.println(e.getMessage()); } }, "Default_Inflation_Thread"); // Make cleaning hook final Thread cleanHook = new Thread(() -> { interrupted = true; configuration.setFlag(VERBOSE_FLAG, false); configuration.setFlag(DELETE_FLAG, false); try { if (inflateToDirectoryThread.isAlive()) inflateToDirectoryThread.interrupt(); // Clean up IO close(); System.gc(); Thread.sleep(100); synchronized (this) { while (inflateToDirectoryThread.isAlive()) this.wait(); } } catch (final IOException | InterruptedException e) { e.printStackTrace(); } finally { // Clean up temporary directory FileUtils.deleteQuietly(tmpOutputDirectory); // Clean up INCOMPLETE output file FileUtils.deleteQuietly(fileOut); System.out.println(); } }, "Inflation_Cleaning_Thread"); cleanHookAtomic.set(cleanHook); Runtime.getRuntime().addShutdownHook(cleanHook); inflateToDirectoryThread.start(); inflateToDirectoryThread.join(); } catch (final IOException | InterruptedException e) { e.printStackTrace(); } return fileOut; }
From source file:com.microsoft.tfs.core.clients.versioncontrol.internal.WebServiceLayerLocalWorkspaces.java
/** * This method supports {@link LocalDataAccessLayer} when it needs to pend * property changes while reconciling a local workspace. The normal * pendChanges() method would start another reconcile. *//* w ww. j av a 2 s .c o m*/ public GetOperation[] pendChangesInLocalWorkspace(final String workspaceName, final String ownerName, final ChangeRequest[] changes, final PendChangesOptions pendChangesOptions, final SupportedFeatures supportedFeatures, final AtomicReference<Failure[]> failures, final String[] itemPropertyFilters, final String[] itemAttributeFilters, final AtomicBoolean onlineOperation, final AtomicReference<ChangePendedFlags> changePendedFlags) { final _Repository4Soap_PendChangesInLocalWorkspaceResponse response; try { response = getRepository4().pendChangesInLocalWorkspace(workspaceName, ownerName, (_ChangeRequest[]) WrapperUtils.unwrap(_ChangeRequest.class, changes), pendChangesOptions.toIntFlags(), supportedFeatures.toIntFlags(), itemPropertyFilters, itemAttributeFilters); } catch (final ProxyException e) { throw VersionControlExceptionMapper.map(e); } final GetOperation[] toReturn = (GetOperation[]) WrapperUtils.wrap(GetOperation.class, response.getPendChangesInLocalWorkspaceResult()); failures.set((Failure[]) WrapperUtils.wrap(Failure.class, response.getFailures())); changePendedFlags.set(new ChangePendedFlags(response.getChangePendedFlags())); onlineOperation.set(true); return toReturn; }
From source file:org.apache.flink.streaming.connectors.kafka.KafkaConsumerTestBase.java
/** * Ensures that the committed offsets to Kafka are the offsets of "the next record to process" *///ww w .j a va 2 s . c o m public void runCommitOffsetsToKafka() throws Exception { // 3 partitions with 50 records each (0-49, so the expected commit offset of each partition should be 50) final int parallelism = 3; final int recordsInEachPartition = 50; final String topicName = writeSequence("testCommitOffsetsToKafkaTopic", recordsInEachPartition, parallelism, 1); final StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort); env.getConfig().disableSysoutLogging(); env.getConfig().setRestartStrategy(RestartStrategies.noRestart()); env.setParallelism(parallelism); env.enableCheckpointing(200); DataStream<String> stream = env .addSource(kafkaServer.getConsumer(topicName, new SimpleStringSchema(), standardProps)); stream.addSink(new DiscardingSink<String>()); final AtomicReference<Throwable> errorRef = new AtomicReference<>(); final Thread runner = new Thread("runner") { @Override public void run() { try { env.execute(); } catch (Throwable t) { if (!(t.getCause() instanceof JobCancellationException)) { errorRef.set(t); } } } }; runner.start(); final Long l50 = 50L; // the final committed offset in Kafka should be 50 final long deadline = 30_000_000_000L + System.nanoTime(); KafkaTestEnvironment.KafkaOffsetHandler kafkaOffsetHandler = kafkaServer.createOffsetHandler(); do { Long o1 = kafkaOffsetHandler.getCommittedOffset(topicName, 0); Long o2 = kafkaOffsetHandler.getCommittedOffset(topicName, 1); Long o3 = kafkaOffsetHandler.getCommittedOffset(topicName, 2); if (l50.equals(o1) && l50.equals(o2) && l50.equals(o3)) { break; } Thread.sleep(100); } while (System.nanoTime() < deadline); // cancel the job JobManagerCommunicationUtils.cancelCurrentJob(flink.getLeaderGateway(timeout)); final Throwable t = errorRef.get(); if (t != null) { throw new RuntimeException("Job failed with an exception", t); } // final check to see if offsets are correctly in Kafka Long o1 = kafkaOffsetHandler.getCommittedOffset(topicName, 0); Long o2 = kafkaOffsetHandler.getCommittedOffset(topicName, 1); Long o3 = kafkaOffsetHandler.getCommittedOffset(topicName, 2); Assert.assertEquals(Long.valueOf(50L), o1); Assert.assertEquals(Long.valueOf(50L), o2); Assert.assertEquals(Long.valueOf(50L), o3); kafkaOffsetHandler.close(); deleteTestTopic(topicName); }
From source file:org.apache.flink.streaming.connectors.kafka.KafkaConsumerTestBase.java
/** * This test ensures that when the consumers retrieve some start offset from kafka (earliest, latest), that this offset * is committed to Kafka, even if some partitions are not read. * * Test:// w w w. ja va 2s . c o m * - Create 3 partitions * - write 50 messages into each. * - Start three consumers with auto.offset.reset='latest' and wait until they committed into Kafka. * - Check if the offsets in Kafka are set to 50 for the three partitions * * See FLINK-3440 as well */ public void runAutoOffsetRetrievalAndCommitToKafka() throws Exception { // 3 partitions with 50 records each (0-49, so the expected commit offset of each partition should be 50) final int parallelism = 3; final int recordsInEachPartition = 50; final String topicName = writeSequence("testAutoOffsetRetrievalAndCommitToKafkaTopic", recordsInEachPartition, parallelism, 1); final StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort); env.getConfig().disableSysoutLogging(); env.getConfig().setRestartStrategy(RestartStrategies.noRestart()); env.setParallelism(parallelism); env.enableCheckpointing(200); Properties readProps = new Properties(); readProps.putAll(standardProps); readProps.setProperty("auto.offset.reset", "latest"); // set to reset to latest, so that partitions are initially not read DataStream<String> stream = env .addSource(kafkaServer.getConsumer(topicName, new SimpleStringSchema(), readProps)); stream.addSink(new DiscardingSink<String>()); final AtomicReference<Throwable> errorRef = new AtomicReference<>(); final Thread runner = new Thread("runner") { @Override public void run() { try { env.execute(); } catch (Throwable t) { if (!(t.getCause() instanceof JobCancellationException)) { errorRef.set(t); } } } }; runner.start(); KafkaTestEnvironment.KafkaOffsetHandler kafkaOffsetHandler = kafkaServer.createOffsetHandler(); final Long l50 = 50L; // the final committed offset in Kafka should be 50 final long deadline = 30_000_000_000L + System.nanoTime(); do { Long o1 = kafkaOffsetHandler.getCommittedOffset(topicName, 0); Long o2 = kafkaOffsetHandler.getCommittedOffset(topicName, 1); Long o3 = kafkaOffsetHandler.getCommittedOffset(topicName, 2); if (l50.equals(o1) && l50.equals(o2) && l50.equals(o3)) { break; } Thread.sleep(100); } while (System.nanoTime() < deadline); // cancel the job JobManagerCommunicationUtils.cancelCurrentJob(flink.getLeaderGateway(timeout)); final Throwable t = errorRef.get(); if (t != null) { throw new RuntimeException("Job failed with an exception", t); } // final check to see if offsets are correctly in Kafka Long o1 = kafkaOffsetHandler.getCommittedOffset(topicName, 0); Long o2 = kafkaOffsetHandler.getCommittedOffset(topicName, 1); Long o3 = kafkaOffsetHandler.getCommittedOffset(topicName, 2); Assert.assertEquals(Long.valueOf(50L), o1); Assert.assertEquals(Long.valueOf(50L), o2); Assert.assertEquals(Long.valueOf(50L), o3); kafkaOffsetHandler.close(); deleteTestTopic(topicName); }
From source file:com.example.app.profile.ui.company.CompanyValueEditor.java
@Override public void init() { VTCropPictureEditorConfig webLogoConfig = _companyConfig.companyWebLogoConfig(); _webLogoEditor = new VTCropPictureEditor(webLogoConfig); _webLogoEditor.addClassName("company-web-logo"); _webLogoEditor.setDefaultResource(_appUtil.getDefaultResourceImage()); VTCropPictureEditorConfig emailLogoConfig = _companyConfig.companyEmailLogoConfig(); _emailLogoEditor = new VTCropPictureEditor(emailLogoConfig); _emailLogoEditor.addClassName("company-web-logo"); _emailLogoEditor.setDefaultResource(_appUtil.getDefaultResourceImage()); super.init(); Label webLogoInstructions = new Label(createText(INSTRUCTIONS_PICTURE_EDITOR_FMT(), webLogoConfig.getCropWidth(), webLogoConfig.getCropHeight())); webLogoInstructions.addClassName(CSS_INSTRUCTIONS); webLogoInstructions.withHTMLElement(HTMLElement.div); Label emailLogoInstructions = new Label(createText(INSTRUCTIONS_PICTURE_EDITOR_FMT(), emailLogoConfig.getCropWidth(), emailLogoConfig.getCropHeight())); emailLogoInstructions.addClassName(CSS_INSTRUCTIONS); emailLogoInstructions.withHTMLElement(HTMLElement.div); add(of("logos", of("prop", LABEL_WEB_LOGO(), _webLogoEditor, webLogoInstructions), of("prop", LABEL_EMAIL_LOGO(), _emailLogoEditor, emailLogoInstructions))); CommonEditorFields.addNameEditor(this); addEditorForProperty(() -> {//from w w w. ja v a 2 s . c o m final CompositeValueEditor<Location> editor = new CompositeValueEditor<>(Location.class); editor.addEditorForProperty(() -> { AddressValueEditorConfig cfg = new AddressValueEditorConfig(); return new AddressValueEditor(cfg); }, Location.ADDRESS_PROP); // editor.addEditorForProperty(() -> { // EmailAddressValueEditorConfig cfg = new EmailAddressValueEditorConfig(); // return new EmailAddressValueEditor(cfg); // }, Location.EMAIL_ADDRESS_PROP); editor.addEditorForProperty(() -> { PhoneNumberValueEditorConfig cfg = new PhoneNumberValueEditorConfig(); return new PhoneNumberValueEditor(cfg); }, Location.PHONE_NUMBER_PROP); return editor; }, Company.PRIMARY_LOCATION_PROP); addEditorForProperty(() -> { final URLEditor editor = new URLEditor(LABEL_WEBSITE(), null); editor.addClassName("website"); return editor; }, ce -> stringToURL(ce.getWebsiteLink(), null), (ce, url) -> ce.setWebsiteLink(urlToString(url))); if (_editMode == AbstractCompanyPropertyEditor.EditMode.StandardCompany) { final String superdomainName = _appUtil.getSite().getDefaultHostname().getName(); final AtomicReference<TextEditor> domainNameEditor = new AtomicReference<>(); final AtomicReference<Container> inputInstructionsRef = new AtomicReference<>(); final AtomicReference<Container> customDomainInstructionsRef = new AtomicReference<>(); final AtomicReference<Label> superdomainLabelRef = new AtomicReference<>(); Function<String, String> convertDomainUIValue = val -> { if (!StringFactory.isEmptyString(val)) { String converted = val; if (converted.endsWith('.' + superdomainName)) converted = converted.replace('.' + superdomainName, ""); converted = HOSTNAME_VALIDITY_PATTERN1.matcher(converted).replaceAll("-"); converted = HOSTNAME_VALIDITY_PATTERN2.matcher(converted).replaceAll("").toLowerCase(); return converted; } return val; }; addEditorForProperty(() -> { final TextEditor editor = new TextEditor(LABEL_SUB_DOMAIN(), null); final Container inputInstructions = _uiHelper .createInputInstructions(INSTRUCTIONS_SUB_DOMAIN(_terms.company())); final Container customDomainInstructions = _uiHelper .createInputInstructions(INSTRUCTIONS_CUSTOM_DOMAIN(_terms.company())); final Label superdomainNameLabel = new Label(createText('.' + superdomainName), span, "super-domain-name"); editor.moveToTop(customDomainInstructions); editor.moveToTop(inputInstructions); editor.moveToTop(editor.getLabel()); editor.add(editor.getValueComponent()); editor.add(superdomainNameLabel); editor.getValueComponent().addPropertyChangeListener(Field.PROP_TEXT, evt -> { if (editor.isEditable()) { String uiValue = editor.getValueComponent().getText(); editor.getValueComponent().setText(convertDomainUIValue.apply(uiValue)); } }); editor.setRequiredValueValidator(); domainNameEditor.set(editor); inputInstructionsRef.set(inputInstructions); customDomainInstructionsRef.set(customDomainInstructions); superdomainLabelRef.set(superdomainNameLabel); return editor; }, ce -> { final TextEditor editor = domainNameEditor.get(); String cehostname = ce.getHostname().getName(); if (cehostname == null) cehostname = ""; domainNameEditor.get().setEditable( !(!StringFactory.isEmptyString(cehostname) && !cehostname.endsWith('.' + superdomainName))); if (editor.isEditable()) { cehostname = convertDomainUIValue.apply(cehostname.replace('.' + superdomainName, "")); } inputInstructionsRef.get().setVisible(editor.isEditable()); superdomainLabelRef.get().setVisible(editor.isEditable()); customDomainInstructionsRef.get().setVisible(!editor.isEditable()); return cehostname; }, (ce, value) -> { if (domainNameEditor.get().isEditable()) ce.getHostname().setName(String.join(".", value, superdomainName)); else ce.getHostname().setName(ce.getHostname().getName()); }); } addEditorForProperty(() -> { final URLEditor editor = new URLEditor(LABEL_LINKEDIN(), null); editor.addClassName("linkedin"); return editor; }, company -> stringToURL(company.getLinkedInLink(), null), (company, url) -> company.setLinkedInLink(urlToString(url))); addEditorForProperty(() -> { final URLEditor editor = new URLEditor(LABEL_TWITTER(), null); editor.addClassName("twitter"); return editor; }, company -> stringToURL(company.getTwitterLink(), null), (company, url) -> company.setTwitterLink(urlToString(url))); addEditorForProperty(() -> { final URLEditor editor = new URLEditor(LABEL_FACEBOOK(), null); editor.addClassName("facebook"); return editor; }, company -> stringToURL(company.getFacebookLink(), null), (company, url) -> company.setFacebookLink(urlToString(url))); }
From source file:org.apache.flink.streaming.connectors.kafka.KafkaConsumerTestBase.java
protected String writeSequence(String baseTopicName, final int numElements, final int parallelism, final int replicationFactor) throws Exception { LOG.info("\n===================================\n" + "== Writing sequence of " + numElements + " into " + baseTopicName + " with p=" + parallelism + "\n" + "==================================="); final TypeInformation<Tuple2<Integer, Integer>> resultType = TypeInformation .of(new TypeHint<Tuple2<Integer, Integer>>() { });/*from w w w .j av a 2 s . c o m*/ final KeyedSerializationSchema<Tuple2<Integer, Integer>> serSchema = new KeyedSerializationSchemaWrapper<>( new TypeInformationSerializationSchema<>(resultType, new ExecutionConfig())); final KeyedDeserializationSchema<Tuple2<Integer, Integer>> deserSchema = new KeyedDeserializationSchemaWrapper<>( new TypeInformationSerializationSchema<>(resultType, new ExecutionConfig())); final int maxNumAttempts = 10; for (int attempt = 1; attempt <= maxNumAttempts; attempt++) { final String topicName = baseTopicName + '-' + attempt; LOG.info("Writing attempt #1"); // -------- Write the Sequence -------- createTestTopic(topicName, parallelism, replicationFactor); StreamExecutionEnvironment writeEnv = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort); writeEnv.getConfig().setRestartStrategy(RestartStrategies.noRestart()); writeEnv.getConfig().disableSysoutLogging(); DataStream<Tuple2<Integer, Integer>> stream = writeEnv .addSource(new RichParallelSourceFunction<Tuple2<Integer, Integer>>() { private boolean running = true; @Override public void run(SourceContext<Tuple2<Integer, Integer>> ctx) throws Exception { int cnt = 0; int partition = getRuntimeContext().getIndexOfThisSubtask(); while (running && cnt < numElements) { ctx.collect(new Tuple2<>(partition, cnt)); cnt++; } } @Override public void cancel() { running = false; } }).setParallelism(parallelism); // the producer must not produce duplicates Properties producerProperties = FlinkKafkaProducerBase .getPropertiesFromBrokerList(brokerConnectionStrings); producerProperties.setProperty("retries", "0"); producerProperties.putAll(secureProps); kafkaServer.produceIntoKafka(stream, topicName, serSchema, producerProperties, new Tuple2Partitioner(parallelism)).setParallelism(parallelism); try { writeEnv.execute("Write sequence"); } catch (Exception e) { LOG.error("Write attempt failed, trying again", e); deleteTestTopic(topicName); JobManagerCommunicationUtils.waitUntilNoJobIsRunning(flink.getLeaderGateway(timeout)); continue; } LOG.info("Finished writing sequence"); // -------- Validate the Sequence -------- // we need to validate the sequence, because kafka's producers are not exactly once LOG.info("Validating sequence"); JobManagerCommunicationUtils.waitUntilNoJobIsRunning(flink.getLeaderGateway(timeout)); final StreamExecutionEnvironment readEnv = StreamExecutionEnvironment .createRemoteEnvironment("localhost", flinkPort); readEnv.getConfig().setRestartStrategy(RestartStrategies.noRestart()); readEnv.getConfig().disableSysoutLogging(); readEnv.setParallelism(parallelism); Properties readProps = (Properties) standardProps.clone(); readProps.setProperty("group.id", "flink-tests-validator"); readProps.putAll(secureProps); FlinkKafkaConsumerBase<Tuple2<Integer, Integer>> consumer = kafkaServer.getConsumer(topicName, deserSchema, readProps); readEnv.addSource(consumer) .map(new RichMapFunction<Tuple2<Integer, Integer>, Tuple2<Integer, Integer>>() { private final int totalCount = parallelism * numElements; private int count = 0; @Override public Tuple2<Integer, Integer> map(Tuple2<Integer, Integer> value) throws Exception { if (++count == totalCount) { throw new SuccessException(); } else { return value; } } }).setParallelism(1).addSink(new DiscardingSink<Tuple2<Integer, Integer>>()).setParallelism(1); final AtomicReference<Throwable> errorRef = new AtomicReference<>(); Thread runner = new Thread() { @Override public void run() { try { tryExecute(readEnv, "sequence validation"); } catch (Throwable t) { errorRef.set(t); } } }; runner.start(); final long deadline = System.nanoTime() + 10_000_000_000L; long delay; while (runner.isAlive() && (delay = deadline - System.nanoTime()) > 0) { runner.join(delay / 1_000_000L); } boolean success; if (runner.isAlive()) { // did not finish in time, maybe the producer dropped one or more records and // the validation did not reach the exit point success = false; JobManagerCommunicationUtils.cancelCurrentJob(flink.getLeaderGateway(timeout)); } else { Throwable error = errorRef.get(); if (error != null) { success = false; LOG.info("Attempt " + attempt + " failed with exception", error); } else { success = true; } } JobManagerCommunicationUtils.waitUntilNoJobIsRunning(flink.getLeaderGateway(timeout)); if (success) { // everything is good! return topicName; } else { deleteTestTopic(topicName); // fall through the loop } } throw new Exception("Could not write a valid sequence to Kafka after " + maxNumAttempts + " attempts"); }
From source file:com.alibaba.wasp.master.FMaster.java
/** * * @param tableName//from ww w .j a va 2 s .co m * @param rowKey * @return * @throws java.io.IOException */ public Pair<EntityGroupInfo, ServerName> getTableEntityGroupForRow(final byte[] tableName, final byte[] rowKey) throws IOException { final AtomicReference<Pair<EntityGroupInfo, ServerName>> result = new AtomicReference<Pair<EntityGroupInfo, ServerName>>( null); MetaScannerVisitor visitor = new MetaScannerVisitorBase() { @Override public boolean processRow(Result data) throws IOException { if (data == null || data.size() <= 0) { return true; } Pair<EntityGroupInfo, ServerName> pair = EntityGroupInfo.getEntityGroupInfoAndServerName(data); if (pair == null) { return false; } if (!Bytes.equals(pair.getFirst().getTableName(), tableName)) { return false; } result.set(pair); return true; } }; FMetaScanner.metaScan(conf, visitor, tableName, rowKey, 1); return result.get(); }
From source file:no.barentswatch.fiskinfo.BaseActivity.java
/** * Attempts to authenticate the given credentials with BarentsWatch. Will * set userIsAuthenticated to true if authentication is successful. * //from w w w . j a v a 2 s .co m * @param username * the username to use for authentication * @param password * the password to use for authentication */ // TODO: Change from hardcoded variables to using the actual username and // password public void authenticateUserCredentials(final String username, final String password) throws Exception { final AtomicReference<String> responseAsString = new AtomicReference<String>(); Thread thread = new Thread(new Runnable() { @Override public void run() { try { System.out.println("user: " + username + ", " + password); CloseableHttpClient httpclient = HttpClients.createDefault(); try { HttpPost httpPost = new HttpPost("https://www.barentswatch.no/api/token"); httpPost.addHeader(HTTP.CONTENT_TYPE, "application/x-www-form-urlencoded"); List<NameValuePair> postParameters = new ArrayList<NameValuePair>(); postParameters.add(new BasicNameValuePair("grant_type", "password")); postParameters.add(new BasicNameValuePair("username", username)); postParameters.add(new BasicNameValuePair("password", password)); httpPost.setEntity(new UrlEncodedFormEntity(postParameters)); CloseableHttpResponse response = httpclient.execute(httpPost); try { responseAsString.set(EntityUtils.toString(response.getEntity())); } finally { response.close(); } } finally { httpclient.close(); } } catch (Exception e) { e.printStackTrace(); } } }); thread.start(); try { thread.join(); } catch (InterruptedException e) { e.printStackTrace(); } String barentswatchResponse = responseAsString.get(); JSONObject barentsWatchResponseToken = new JSONObject(barentswatchResponse); saveUserCredentialsToSharedPreferences(barentsWatchResponseToken, username, password); getAuthenticationCredientialsFromSharedPrefrences(); setAuthentication(true); loadView(MyPageActivity.class); }
From source file:com.microsoft.gittf.client.clc.commands.framework.Command.java
private TFSTeamProjectCollection getConnection(final URI serverURI, final AtomicReference<Credentials> credentials) throws Exception { Check.notNull(serverURI, "serverURI"); //$NON-NLS-1$ Check.notNull(credentials, "credentials"); //$NON-NLS-1$ if (connection == null) { getProgressMonitor().displayMessage(Messages.getString("Command.ConnectingToTFS")); //$NON-NLS-1$ boolean authenticated = false, isHostedServer = false; int connectionTryCount = 0; while (!authenticated) { connectionTryCount++;//from w w w . ja va 2s .co m connection = new TFSTeamProjectCollection(serverURI, credentials.get(), new GitTFConnectionAdvisor()); try { connection.ensureAuthenticated(); authenticated = true; } catch (TECoreException e) { if (e.getCause() != null && e.getCause() instanceof EndpointNotFoundException) { throw new Exception(Messages.formatString("Command.InvalidServerMissingCollectionFormat", //$NON-NLS-1$ serverURI.toString()), e); } if (connectionTryCount > 3) { if (isHostedServer) { throw new Exception(Messages.formatString("Command.FailedToConnectToHostedFormat", //$NON-NLS-1$ serverURI.toString()), e); } throw e; } if (e instanceof ACSUnauthorizedException || e instanceof TFSFederatedAuthException || (e.getCause() != null && (e.getCause() instanceof AuthenticationException || e.getCause() instanceof UnauthorizedException))) { if (connectionTryCount == 1) { isHostedServer = e instanceof TFSFederatedAuthException; } Credentials newCredentials = promptForCredentials(connection.getCredentials()); if (newCredentials == null) { throw e; } credentials.set(newCredentials); } else { throw e; } } } } return connection; }