Example usage for java.util.concurrent.atomic AtomicBoolean get

List of usage examples for java.util.concurrent.atomic AtomicBoolean get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicBoolean get.

Prototype

public final boolean get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:com.netflix.conductor.core.execution.TestWorkflowExecutor.java

@Test
public void test() throws Exception {

    AtomicBoolean httpTaskExecuted = new AtomicBoolean(false);
    AtomicBoolean http2TaskExecuted = new AtomicBoolean(false);

    new Wait();//from  w  ww  .  j  av  a 2 s.  c om
    new WorkflowSystemTask("HTTP") {
        @Override
        public boolean isAsync() {
            return true;
        }

        @Override
        public void start(Workflow workflow, Task task, WorkflowExecutor executor) throws Exception {
            httpTaskExecuted.set(true);
            task.setStatus(Status.COMPLETED);
            super.start(workflow, task, executor);
        }

    };

    new WorkflowSystemTask("HTTP2") {

        @Override
        public void start(Workflow workflow, Task task, WorkflowExecutor executor) throws Exception {
            http2TaskExecuted.set(true);
            task.setStatus(Status.COMPLETED);
            super.start(workflow, task, executor);
        }

    };

    Workflow workflow = new Workflow();
    workflow.setWorkflowId("1");

    TestConfiguration config = new TestConfiguration();
    MetadataDAO metadata = mock(MetadataDAO.class);
    ExecutionDAO edao = mock(ExecutionDAO.class);
    QueueDAO queue = mock(QueueDAO.class);
    ObjectMapper om = new ObjectMapper();

    WorkflowExecutor executor = new WorkflowExecutor(metadata, edao, queue, om, config);
    List<Task> tasks = new LinkedList<>();

    WorkflowTask taskToSchedule = new WorkflowTask();
    taskToSchedule.setWorkflowTaskType(Type.USER_DEFINED);
    taskToSchedule.setType("HTTP");

    WorkflowTask taskToSchedule2 = new WorkflowTask();
    taskToSchedule2.setWorkflowTaskType(Type.USER_DEFINED);
    taskToSchedule2.setType("HTTP2");

    WorkflowTask wait = new WorkflowTask();
    wait.setWorkflowTaskType(Type.WAIT);
    wait.setType("WAIT");
    wait.setTaskReferenceName("wait");

    Task task1 = SystemTask.userDefined(workflow, IDGenerator.generate(), taskToSchedule, new HashMap<>(), null,
            0);
    Task task2 = SystemTask.waitTask(workflow, IDGenerator.generate(), taskToSchedule, new HashMap<>());
    Task task3 = SystemTask.userDefined(workflow, IDGenerator.generate(), taskToSchedule2, new HashMap<>(),
            null, 0);

    tasks.add(task1);
    tasks.add(task2);
    tasks.add(task3);

    when(edao.createTasks(tasks)).thenReturn(tasks);
    AtomicInteger startedTaskCount = new AtomicInteger(0);
    doAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            startedTaskCount.incrementAndGet();
            return null;
        }
    }).when(edao).updateTask(any());

    AtomicInteger queuedTaskCount = new AtomicInteger(0);
    doAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            String queueName = invocation.getArgumentAt(0, String.class);
            System.out.println(queueName);
            queuedTaskCount.incrementAndGet();
            return null;
        }
    }).when(queue).push(any(), any(), anyInt());

    boolean stateChanged = executor.scheduleTask(workflow, tasks);
    assertEquals(2, startedTaskCount.get());
    assertEquals(1, queuedTaskCount.get());
    assertTrue(stateChanged);
    assertFalse(httpTaskExecuted.get());
    assertTrue(http2TaskExecuted.get());
}

From source file:com.github.jackygurui.vertxredissonrepository.repository.Impl.RedisRepositoryImpl.java

private void persistBlocking(String id, JsonObject data, RBatch redissonBatch,
        Handler<AsyncResult<Boolean>> resultHandler) {
    RBatch batch = redissonBatch == null ? redissonWrite.createBatch() : redissonBatch;
    AtomicBoolean failed = new AtomicBoolean(false);
    try {//  w  ww .  j  a va  2 s  . c  o m
        BeanMap pMap = new BeanMap(cls.newInstance());
        //remove the indexes;
        if (isRedisEntity()) {
            AtomicBoolean finished = new AtomicBoolean(false);
            AtomicBoolean hasNested = new AtomicBoolean(false);
            AtomicLong stack = new AtomicLong();
            pMap.forEach((k, v) -> {
                if ("class".equals(k)) {
                    return;
                }
                Class<?> type = pMap.getType((String) k);
                if (!isRedisEntity(type)) {
                    //recreate the indexes;
                    if ("id".equals(k)) {
                        batch.getMap(getStorageKey(), StringCodec.INSTANCE).fastPutAsync(id, id);
                    } else {
                        batch.getMap(getStorageKey((String) k)).fastPutAsync(id, data.getValue((String) k));
                    }
                } else {
                    hasNested.set(true);
                    stack.incrementAndGet();
                    RedisRepositoryImpl<?> innerRepo;
                    try {
                        innerRepo = (RedisRepositoryImpl) factory.instance(type);
                    } catch (RepositoryException e) {
                        throw new RuntimeException(e);
                    }
                    JsonObject value = data.getJsonObject((String) k);
                    final boolean newOne = !value.containsKey("id") || value.getString("id") == null
                            || "null".equals(value.getString("id"));
                    final String ID = newOne ? id : value.getString("id");
                    innerRepo.persist(ID, value, batch, c -> {//making the nested entity shares the same id as the parent when its 1:1 relation. This makes fetch a lot faster since it doesn't not need to resolve the reference when fetching 1:1 nested objects.
                        if (c.succeeded()) {
                            long s = stack.decrementAndGet();
                            if (newOne) {
                                batch.getMap(getStorageKey((String) k)).fastPutAsync(id, ID);//different to the update, create needs to add the reference field to batch
                            }
                            if (s == 0 && finished.get() && !failed.get()) { //finished iterating and no outstanding processes. 
                                if (redissonBatch == null) {//if it's not inside a nested process.
                                    finishPersist(id, data, batch, resultHandler);
                                } else {//if it is inside a nested process.
                                    resultHandler.handle(Future.succeededFuture(true));
                                }
                            }
                            //else wait for others to complete
                        } else {
                            boolean firstToFail = failed.compareAndSet(false, true);
                            if (firstToFail) {
                                resultHandler.handle(Future.failedFuture(c.cause()));
                            }
                        }
                    });
                }
            });
            batch.getAtomicLongAsync(getCounterKey()).incrementAndGetAsync();
            finished.set(true);
            if (!hasNested.get()) {//does not have nested RedissonEntity within
                if (redissonBatch == null) {//if it's not inside a nested process.
                    finishPersist(id, data, batch, resultHandler);
                } else {//if it is inside a nested process.
                    resultHandler.handle(Future.succeededFuture(true));
                }
            }
        } else {//not a RedissonEntity class, persist as json string.
            //recreate the indexes;
            batch.<String, String>getMap(getStorageKey(), StringCodec.INSTANCE).fastPutAsync(id,
                    Json.encode(data));
            batch.getAtomicLongAsync(getCounterKey()).incrementAndGetAsync();
            if (redissonBatch == null) {//if it's not inside a nested process.
                finishPersist(id, data, batch, resultHandler);
            } else {//if it is inside a nested process.
                resultHandler.handle(Future.succeededFuture(true));
            }
        }
    } catch (InstantiationException | IllegalAccessException | RuntimeException ex) {
        failed.set(true);
        resultHandler.handle(Future.failedFuture(ex));
    }
}

From source file:com.microsoft.alm.plugin.idea.tfvc.ui.checkout.TfvcCheckoutModel.java

@Override
public void doCheckout(final Project project, final CheckoutProvider.Listener listener,
        final ServerContext context, final VirtualFile destinationParent, final String directoryName,
        final String parentDirectory, final boolean isAdvancedChecked) {
    final String workspaceName = directoryName;
    final String teamProjectName = getRepositoryName(context);
    final String localPath = Path.combine(parentDirectory, directoryName);
    final AtomicBoolean checkoutResult = new AtomicBoolean();
    (new Task.Backgroundable(project,
            TfPluginBundle.message(TfPluginBundle.KEY_CHECKOUT_TFVC_CREATING_WORKSPACE), true,
            PerformInBackgroundOption.DEAF) {
        public void run(@NotNull final ProgressIndicator indicator) {
            IdeaHelper.setProgress(indicator, 0.10,
                    TfPluginBundle.message(TfPluginBundle.KEY_CHECKOUT_TFVC_PROGRESS_CREATING));

            try {
                // Create the workspace with default values
                final CreateWorkspaceCommand command = new CreateWorkspaceCommand(context, workspaceName,
                        TfPluginBundle.message(TfPluginBundle.KEY_CHECKOUT_TFVC_WORKSPACE_COMMENT), null, null);
                command.runSynchronously();
            } catch (final WorkspaceAlreadyExistsException e) {
                logger.warn("Error creating workspace: "
                        + LocalizationServiceImpl.getInstance().getExceptionMessage(e));
                // TODO: allow user to change name in the flow instead of starting over
                IdeaHelper.runOnUIThread(new Runnable() {
                    @Override/*from w ww  . j  av a2  s.  co  m*/
                    public void run() {
                        Messages.showErrorDialog(project,
                                LocalizationServiceImpl.getInstance().getExceptionMessage(e),
                                TfPluginBundle.message(TfPluginBundle.KEY_CHECKOUT_TFVC_FAILED_TITLE));
                    }
                });

                // returning since the workspace failed to create so we can't proceed with the next steps
                return;
            }

            IdeaHelper.setProgress(indicator, 0.20,
                    TfPluginBundle.message(TfPluginBundle.KEY_CHECKOUT_TFVC_PROGRESS_ADD_ROOT));

            // Map the project root to the local folder
            final String serverPath = VcsHelper.TFVC_ROOT + teamProjectName;
            final UpdateWorkspaceMappingCommand mappingCommand = new UpdateWorkspaceMappingCommand(context,
                    workspaceName, new Workspace.Mapping(serverPath, localPath, false), false);
            mappingCommand.runSynchronously();

            IdeaHelper.setProgress(indicator, 0.30,
                    TfPluginBundle.message(TfPluginBundle.KEY_CHECKOUT_TFVC_PROGRESS_CREATE_FOLDER));

            // Ensure that the local folder exists
            final File file = new File(localPath);
            if (!file.mkdirs()) {
                //TODO should we throw here?
            }

            // if advanced is set, then sync just some of the files (those that we need for IntelliJ)
            // Otherwise, sync all the files for the team project
            if (!isAdvancedChecked) {
                IdeaHelper.setProgress(indicator, 0.50,
                        TfPluginBundle.message(TfPluginBundle.KEY_CHECKOUT_TFVC_PROGRESS_SYNC));
                // Sync all files recursively
                CommandUtils.syncWorkspace(context, localPath);
            }

            IdeaHelper.setProgress(indicator, 1.00, "", true);

            // No exception means that it was successful
            checkoutResult.set(true);
        }

        public void onSuccess() {
            if (checkoutResult.get()) {
                // Check the isAdvanced flag
                if (isAdvancedChecked) {
                    // The user wants to edit the workspace before syncing...
                    final RepositoryContext repositoryContext = RepositoryContext.createTfvcContext(localPath,
                            workspaceName, teamProjectName, context.getServerUri().toString());
                    final WorkspaceController controller = new WorkspaceController(project, repositoryContext,
                            workspaceName);
                    if (controller.showModalDialog(false)) {
                        // Save and Sync the workspace (this will be backgrounded)
                        controller.saveWorkspace(localPath, true, new Runnable() {
                            @Override
                            public void run() {
                                // Files are all synchronized, so trigger the VCS update
                                UpdateVersionControlSystem(project, parentDirectory, directoryName,
                                        destinationParent, listener);
                            }
                        });
                    }
                } else {
                    // We don't have to wait for the workspace to be updated, so just trigger the VCS update
                    UpdateVersionControlSystem(project, parentDirectory, directoryName, destinationParent,
                            listener);
                }
            }
        }
    }).queue();
}

From source file:org.apache.hadoop.hdfs.TestBlockReaderFactory.java

/**
 * When an InterruptedException is sent to a thread calling
 * FileChannel#read, the FileChannel is immediately closed and the
 * thread gets an exception.  This effectively means that we might have
 * someone asynchronously calling close() on the file descriptors we use
 * in BlockReaderLocal.  So when unreferencing a ShortCircuitReplica in
 * ShortCircuitCache#unref, we should check if the FileChannel objects
 * are still open.  If not, we should purge the replica to avoid giving
 * it out to any future readers./* w ww .j  av  a2s  . com*/
 *
 * This is a regression test for HDFS-6227: Short circuit read failed
 * due to ClosedChannelException.
 *
 * Note that you may still get ClosedChannelException errors if two threads
 * are reading from the same replica and an InterruptedException is delivered
 * to one of them.
 */
@Test(timeout = 120000)
public void testPurgingClosedReplicas() throws Exception {
    BlockReaderTestUtil.enableBlockReaderFactoryTracing();
    final AtomicInteger replicasCreated = new AtomicInteger(0);
    final AtomicBoolean testFailed = new AtomicBoolean(false);
    DFSInputStream.tcpReadsDisabledForTesting = true;
    BlockReaderFactory.createShortCircuitReplicaInfoCallback = new ShortCircuitCache.ShortCircuitReplicaCreator() {
        @Override
        public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
            replicasCreated.incrementAndGet();
            return null;
        }
    };
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration conf = createShortCircuitConf("testPurgingClosedReplicas", sockDir);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 4095;
    final int SEED = 0xFADE0;
    final DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(cluster.getURI(0), conf);
    DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
    final Semaphore sem = new Semaphore(0);
    final List<LocatedBlock> locatedBlocks = cluster.getNameNode().getRpcServer()
            .getBlockLocations(TEST_FILE, 0, TEST_FILE_LEN).getLocatedBlocks();
    final LocatedBlock lblock = locatedBlocks.get(0); // first block
    final byte[] buf = new byte[TEST_FILE_LEN];
    Runnable readerRunnable = new Runnable() {
        @Override
        public void run() {
            try {
                while (true) {
                    BlockReader blockReader = null;
                    try {
                        blockReader = BlockReaderTestUtil.getBlockReader(cluster, lblock, 0, TEST_FILE_LEN);
                        sem.release();
                        try {
                            blockReader.readAll(buf, 0, TEST_FILE_LEN);
                        } finally {
                            sem.acquireUninterruptibly();
                        }
                    } catch (ClosedByInterruptException e) {
                        LOG.info("got the expected ClosedByInterruptException", e);
                        sem.release();
                        break;
                    } finally {
                        if (blockReader != null)
                            blockReader.close();
                    }
                    LOG.info("read another " + TEST_FILE_LEN + " bytes.");
                }
            } catch (Throwable t) {
                LOG.error("getBlockReader failure", t);
                testFailed.set(true);
                sem.release();
            }
        }
    };
    Thread thread = new Thread(readerRunnable);
    thread.start();
    // While the thread is reading, send it interrupts.
    // These should trigger a ClosedChannelException.
    while (thread.isAlive()) {
        sem.acquireUninterruptibly();
        thread.interrupt();
        sem.release();
    }
    Assert.assertFalse(testFailed.get());
    // We should be able to read from the file without
    // getting a ClosedChannelException.
    BlockReader blockReader = null;
    try {
        blockReader = BlockReaderTestUtil.getBlockReader(cluster, lblock, 0, TEST_FILE_LEN);
        blockReader.readFully(buf, 0, TEST_FILE_LEN);
    } finally {
        if (blockReader != null)
            blockReader.close();
    }
    byte expected[] = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
    Assert.assertTrue(Arrays.equals(buf, expected));
    // Another ShortCircuitReplica object should have been created.
    Assert.assertEquals(2, replicasCreated.get());
    dfs.close();
    cluster.shutdown();
    sockDir.close();
}

From source file:com.qubole.quark.planner.parser.SqlQueryParser.java

public SqlQueryParserResult parse(String sql) throws SQLException {
    DataSourceSchema dataSource = this.context.getDefaultDataSource();
    final AtomicBoolean foundNonQuarkScan = new AtomicBoolean(false);
    final ImmutableSet.Builder<DataSourceSchema> dsBuilder = new ImmutableSet.Builder<>();
    try {//from   ww  w .j a v a2s .c  om
        final SqlKind kind = getSqlParser(sql).parseQuery().getKind();
        SqlQueryParserResult result = new SqlQueryParserResult(stripNamespace(sql, dataSource), dataSource,
                kind, null, false);
        RelNode relNode = parseInternal(sql);
        final RelVisitor relVisitor = new RelVisitor() {
            @Override
            public void visit(RelNode node, int ordinal, RelNode parent) {
                if (node instanceof QuarkViewScan) {
                    visitQuarkViewScan((QuarkViewScan) node);
                } else if (node instanceof QuarkTileScan) {
                    visitQuarkTileScan((QuarkTileScan) node);
                } else if (node instanceof TableScan) {
                    visitNonQuarkScan((TableScan) node);
                }
                super.visit(node, ordinal, parent);
            }

            private void visitNonQuarkScan(TableScan node) {
                foundNonQuarkScan.set(true);
                final String schemaName = node.getTable().getQualifiedName().get(0);
                CalciteSchema schema = CalciteSchema.from(getRootSchma()).getSubSchema(schemaName, false);
                dsBuilder.addAll(getDrivers(schema));
            }

            private void visitQuarkTileScan(QuarkTileScan node) {
                QuarkTile quarkTile = node.getQuarkTile();
                CalciteCatalogReader calciteCatalogReader = new CalciteCatalogReader(
                        CalciteSchema.from(getRootSchma()), false, context.getDefaultSchemaPath(),
                        getTypeFactory());
                CalciteSchema tileSchema = calciteCatalogReader.getTable(quarkTile.tableName)
                        .unwrap(CalciteSchema.class);
                dsBuilder.addAll(getDrivers(tileSchema));
            }

            private void visitQuarkViewScan(QuarkViewScan node) {
                QuarkTable table = node.getQuarkTable();
                if (table instanceof QuarkViewTable) {
                    final CalciteSchema tableSchema = ((QuarkViewTable) table).getBackupTableSchema();
                    dsBuilder.addAll(getDrivers(tableSchema));
                }
            }

            private ImmutableSet<DataSourceSchema> getDrivers(CalciteSchema tableSchema) {
                final ImmutableSet.Builder<DataSourceSchema> dsBuilder = new ImmutableSet.Builder<>();
                SchemaPlus tableSchemaPlus = tableSchema.plus();
                while (tableSchemaPlus != null) {
                    Schema schema = CalciteSchema.from(tableSchemaPlus).schema;
                    if (schema instanceof DataSourceSchema) {
                        dsBuilder.add((DataSourceSchema) schema);
                    }
                    tableSchemaPlus = tableSchemaPlus.getParentSchema();
                }
                return dsBuilder.build();
            }

        };

        relVisitor.go(relNode);

        ImmutableSet<DataSourceSchema> dataSources = dsBuilder.build();

        if (!foundNonQuarkScan.get() && dataSources.size() == 1) {
            /**
             * Check if query is completely optimized for a data source
             */
            final DataSourceSchema newDataSource = dataSources.asList().get(0);
            final SqlDialect dialect = newDataSource.getDataSource().getSqlDialect();
            final String parsedSql = getParsedSql(relNode, dialect);
            result = new SqlQueryParserResult(parsedSql, newDataSource, kind, relNode, true);
        } else if (foundNonQuarkScan.get() && dataSources.size() == 1) {
            /**
             * Check if its not optimized
             */
            final DataSourceSchema newDataSource = dataSources.asList().get(0);
            final String stripNamespace = stripNamespace(sql, newDataSource);
            result = new SqlQueryParserResult(stripNamespace, newDataSource, kind, relNode, true);
        } else if (this.context.isUnitTestMode()) {
            String parsedSql = getParsedSql(relNode,
                    new SqlDialect(SqlDialect.DatabaseProduct.UNKNOWN, "UNKNOWN", null, true));
            result = new SqlQueryParserResult(parsedSql, null, kind, relNode, true);
        } else if (dataSources.size() > 1) {
            /**
             * Check if it's partially optimized, i.e., tablescans of multiple datasources
             * are found in RelNode. We currently donot support multiple datasources.
             */
            throw new SQLException("Federation between data sources is not allowed", "0A001");
        } else if (dataSources.isEmpty()) {
            throw new SQLException("No dataSource found for query", "3D001");
        }
        return result;
    } catch (SQLException e) {
        throw e;
    } catch (Exception e) {
        throw new SQLException(e);
    }
}

From source file:org.apache.hadoop.hdfs.client.impl.TestBlockReaderFactory.java

/**
 * When an InterruptedException is sent to a thread calling
 * FileChannel#read, the FileChannel is immediately closed and the
 * thread gets an exception.  This effectively means that we might have
 * someone asynchronously calling close() on the file descriptors we use
 * in BlockReaderLocal.  So when unreferencing a ShortCircuitReplica in
 * ShortCircuitCache#unref, we should check if the FileChannel objects
 * are still open.  If not, we should purge the replica to avoid giving
 * it out to any future readers./*from w ww.  j  av  a  2  s  .c  o m*/
 *
 * This is a regression test for HDFS-6227: Short circuit read failed
 * due to ClosedChannelException.
 *
 * Note that you may still get ClosedChannelException errors if two threads
 * are reading from the same replica and an InterruptedException is delivered
 * to one of them.
 */
@Test(timeout = 120000)
public void testPurgingClosedReplicas() throws Exception {
    BlockReaderTestUtil.enableBlockReaderFactoryTracing();
    final AtomicInteger replicasCreated = new AtomicInteger(0);
    final AtomicBoolean testFailed = new AtomicBoolean(false);
    DFSInputStream.tcpReadsDisabledForTesting = true;
    BlockReaderFactory.createShortCircuitReplicaInfoCallback = new ShortCircuitCache.ShortCircuitReplicaCreator() {
        @Override
        public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
            replicasCreated.incrementAndGet();
            return null;
        }
    };
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration conf = createShortCircuitConf("testPurgingClosedReplicas", sockDir);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 4095;
    final int SEED = 0xFADE0;
    final DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(cluster.getURI(0), conf);
    DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);

    final Semaphore sem = new Semaphore(0);
    final List<LocatedBlock> locatedBlocks = cluster.getNameNode().getRpcServer()
            .getBlockLocations(TEST_FILE, 0, TEST_FILE_LEN).getLocatedBlocks();
    final LocatedBlock lblock = locatedBlocks.get(0); // first block
    final byte[] buf = new byte[TEST_FILE_LEN];
    Runnable readerRunnable = new Runnable() {
        @Override
        public void run() {
            try {
                while (true) {
                    BlockReader blockReader = null;
                    try {
                        blockReader = BlockReaderTestUtil.getBlockReader(cluster.getFileSystem(), lblock, 0,
                                TEST_FILE_LEN);
                        sem.release();
                        try {
                            blockReader.readAll(buf, 0, TEST_FILE_LEN);
                        } finally {
                            sem.acquireUninterruptibly();
                        }
                    } catch (ClosedByInterruptException e) {
                        LOG.info("got the expected ClosedByInterruptException", e);
                        sem.release();
                        break;
                    } finally {
                        if (blockReader != null)
                            blockReader.close();
                    }
                    LOG.info("read another " + TEST_FILE_LEN + " bytes.");
                }
            } catch (Throwable t) {
                LOG.error("getBlockReader failure", t);
                testFailed.set(true);
                sem.release();
            }
        }
    };
    Thread thread = new Thread(readerRunnable);
    thread.start();

    // While the thread is reading, send it interrupts.
    // These should trigger a ClosedChannelException.
    while (thread.isAlive()) {
        sem.acquireUninterruptibly();
        thread.interrupt();
        sem.release();
    }
    Assert.assertFalse(testFailed.get());

    // We should be able to read from the file without
    // getting a ClosedChannelException.
    BlockReader blockReader = null;
    try {
        blockReader = BlockReaderTestUtil.getBlockReader(cluster.getFileSystem(), lblock, 0, TEST_FILE_LEN);
        blockReader.readFully(buf, 0, TEST_FILE_LEN);
    } finally {
        if (blockReader != null)
            blockReader.close();
    }
    byte expected[] = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
    Assert.assertTrue(Arrays.equals(buf, expected));

    // Another ShortCircuitReplica object should have been created.
    Assert.assertEquals(2, replicasCreated.get());

    dfs.close();
    cluster.shutdown();
    sockDir.close();
}

From source file:com.microsoft.tfs.core.clients.versioncontrol.internal.localworkspace.LocalDataAccessLayer.java

public static boolean reconcileLocalWorkspace(final Workspace workspace, final WebServiceLayer webServiceLayer,
        final boolean unscannedReconcile, final boolean reconcileMissingFromDisk,
        final AtomicReference<Failure[]> failures, final AtomicBoolean pendingChangesUpdatedByServer) {
    int previousProjectRevisionId = -1;
    boolean processedProjectRenames = false;

    while (true) {
        try {/*from ww w.  j  a  v a2 s. c  o m*/
            final boolean reconciled = reconcileLocalWorkspaceHelper(workspace, webServiceLayer,
                    unscannedReconcile, reconcileMissingFromDisk, failures, pendingChangesUpdatedByServer);

            pendingChangesUpdatedByServer.set(pendingChangesUpdatedByServer.get() || processedProjectRenames);

            return reconciled;
        } catch (final ReconcileBlockedByProjectRenameException renameEx) {
            // Did we just ACK a new project revision ID, but the server
            // sent us another instruction
            // to move to the same revision ID we just moved to? Rather than
            // loop forever we will throw
            // in this case.
            if (previousProjectRevisionId >= 0
                    && previousProjectRevisionId == renameEx.getNewProjectRevisionId()) {
                final Failure failure = new Failure(MessageFormat.format(
                        Messages.getString("LocalDataAccessLayer.RepeatedProjectRevisionIdFormat"), //$NON-NLS-1$
                        renameEx.getNewProjectRevisionId()), null, SeverityType.ERROR, null);

                throw new ReconcileFailedException(new Failure[] { failure });
            }

            final List<KeyValuePair<String, String>> projectRenames = new ArrayList<KeyValuePair<String, String>>();

            for (int i = 0; i < renameEx.getOldProjectNames().length; i++) {
                projectRenames.add(new KeyValuePair<String, String>(renameEx.getOldProjectNames()[i],
                        renameEx.getNewProjectNames()[i]));
            }

            ProcessProjectRenames(workspace, webServiceLayer, projectRenames,
                    renameEx.getNewProjectRevisionId());

            previousProjectRevisionId = renameEx.getNewProjectRevisionId();
            processedProjectRenames = true;

            continue;
        }
    }

    /*
     * Unreachable code in C#
     */
    // throw new ReconcileFailedException(new Failure[]
    // {
    // new Failure("Could not drain all renames", null, SeverityType.ERROR,
    // null) //$NON-NLS-1$
    // });
}

From source file:org.apache.tinkerpop.gremlin.structure.IoTest.java

@Test
@FeatureRequirement(featureClass = Graph.Features.EdgeFeatures.class, feature = Graph.Features.EdgeFeatures.FEATURE_ADD_EDGES)
@FeatureRequirement(featureClass = Graph.Features.VertexFeatures.class, feature = Graph.Features.VertexFeatures.FEATURE_ADD_VERTICES)
@FeatureRequirement(featureClass = EdgePropertyFeatures.class, feature = EdgePropertyFeatures.FEATURE_DOUBLE_VALUES)
public void shouldReadWriteDetachedEdgeAsReferenceToGryo() throws Exception {
    final Vertex v1 = g.addVertex(T.label, "person");
    final Vertex v2 = g.addVertex(T.label, "person");
    final Edge e = DetachedFactory.detach(v1.addEdge("friend", v2, "weight", 0.5d, "acl", "rw"), false);

    try (final ByteArrayOutputStream os = new ByteArrayOutputStream()) {
        final GryoWriter writer = g.io().gryoWriter().create();
        writer.writeEdge(os, e);//from  ww w  . ja  v  a2s  .c  om

        final AtomicBoolean called = new AtomicBoolean(false);
        final GryoReader reader = g.io().gryoReader().workingDirectory(File.separator + "tmp").create();
        try (final ByteArrayInputStream bais = new ByteArrayInputStream(os.toByteArray())) {
            reader.readEdge(bais, detachedEdge -> {
                assertEquals(e.id(), detachedEdge.id());
                assertEquals(v1.id(), detachedEdge.iterators().vertexIterator(Direction.OUT).next().id());
                assertEquals(v2.id(), detachedEdge.iterators().vertexIterator(Direction.IN).next().id());
                assertEquals(v1.label(), detachedEdge.iterators().vertexIterator(Direction.OUT).next().label());
                assertEquals(v2.label(), detachedEdge.iterators().vertexIterator(Direction.IN).next().label());
                assertEquals(e.label(), detachedEdge.label());
                assertEquals(e.keys().size(),
                        StreamFactory.stream(detachedEdge.iterators().propertyIterator()).count());
                called.set(true);

                return null;
            });
        }

        assertTrue(called.get());
    }
}

From source file:org.apache.tinkerpop.gremlin.structure.IoTest.java

@Test
@FeatureRequirement(featureClass = Graph.Features.EdgeFeatures.class, feature = Graph.Features.EdgeFeatures.FEATURE_ADD_EDGES)
@FeatureRequirement(featureClass = Graph.Features.VertexFeatures.class, feature = Graph.Features.VertexFeatures.FEATURE_ADD_VERTICES)
@FeatureRequirement(featureClass = EdgePropertyFeatures.class, feature = EdgePropertyFeatures.FEATURE_DOUBLE_VALUES)
public void shouldReadWriteEdgeToGryo() throws Exception {
    final Vertex v1 = g.addVertex(T.label, "person");
    final Vertex v2 = g.addVertex(T.label, "person");
    final Edge e = v1.addEdge("friend", v2, "weight", 0.5d, "acl", "rw");

    try (final ByteArrayOutputStream os = new ByteArrayOutputStream()) {
        final GryoWriter writer = g.io().gryoWriter().create();
        writer.writeEdge(os, e);/*www . ja v a 2s  . co m*/

        final AtomicBoolean called = new AtomicBoolean(false);
        final GryoReader reader = g.io().gryoReader().workingDirectory(File.separator + "tmp").create();
        try (final ByteArrayInputStream bais = new ByteArrayInputStream(os.toByteArray())) {
            reader.readEdge(bais, detachedEdge -> {
                assertEquals(e.id(), detachedEdge.id());
                assertEquals(v1.id(), detachedEdge.iterators().vertexIterator(Direction.OUT).next().id());
                assertEquals(v2.id(), detachedEdge.iterators().vertexIterator(Direction.IN).next().id());
                assertEquals(v1.label(), detachedEdge.iterators().vertexIterator(Direction.OUT).next().label());
                assertEquals(v2.label(), detachedEdge.iterators().vertexIterator(Direction.IN).next().label());
                assertEquals(e.label(), detachedEdge.label());
                assertEquals(0.5d, e.iterators().propertyIterator("weight").next().value());
                assertEquals("rw", e.iterators().propertyIterator("acl").next().value());
                called.set(true);
                return null;
            });
        }

        assertTrue(called.get());
    }
}

From source file:org.apache.tinkerpop.gremlin.structure.IoTest.java

@Test
@FeatureRequirement(featureClass = Graph.Features.EdgeFeatures.class, feature = Graph.Features.EdgeFeatures.FEATURE_ADD_EDGES)
@FeatureRequirement(featureClass = Graph.Features.VertexFeatures.class, feature = Graph.Features.VertexFeatures.FEATURE_ADD_VERTICES)
@FeatureRequirement(featureClass = EdgePropertyFeatures.class, feature = EdgePropertyFeatures.FEATURE_DOUBLE_VALUES)
public void shouldReadWriteDetachedEdgeToGryo() throws Exception {
    final Vertex v1 = g.addVertex(T.label, "person");
    final Vertex v2 = g.addVertex(T.label, "person");
    final Edge e = DetachedFactory.detach(v1.addEdge("friend", v2, "weight", 0.5d, "acl", "rw"), true);

    try (final ByteArrayOutputStream os = new ByteArrayOutputStream()) {
        final GryoWriter writer = g.io().gryoWriter().create();
        writer.writeEdge(os, e);/*from  www.j  a v a  2 s  . c  om*/

        final AtomicBoolean called = new AtomicBoolean(false);
        final GryoReader reader = g.io().gryoReader().workingDirectory(File.separator + "tmp").create();
        try (final ByteArrayInputStream bais = new ByteArrayInputStream(os.toByteArray())) {
            reader.readEdge(bais, detachedEdge -> {
                assertEquals(e.id(), detachedEdge.id());
                assertEquals(v1.id(), detachedEdge.iterators().vertexIterator(Direction.OUT).next().id());
                assertEquals(v2.id(), detachedEdge.iterators().vertexIterator(Direction.IN).next().id());
                assertEquals(v1.label(), detachedEdge.iterators().vertexIterator(Direction.OUT).next().label());
                assertEquals(v2.label(), detachedEdge.iterators().vertexIterator(Direction.IN).next().label());
                assertEquals(e.label(), detachedEdge.label());
                assertEquals(0.5d, detachedEdge.iterators().propertyIterator("weight").next().value());
                assertEquals("rw", detachedEdge.iterators().propertyIterator("acl").next().value());
                called.set(true);
                return null;
            });
        }

        assertTrue(called.get());
    }
}