Example usage for com.google.common.collect Queues synchronizedQueue

List of usage examples for com.google.common.collect Queues synchronizedQueue

Introduction

In this page you can find the example usage for com.google.common.collect Queues synchronizedQueue.

Prototype

public static <E> Queue<E> synchronizedQueue(Queue<E> queue) 

Source Link

Document

Returns a synchronized (thread-safe) queue backed by the specified queue.

Usage

From source file:org.embeddedrailroad.eri.layoutio.cmri.CmriPollMachine.java

/***
 *  Create a polling machine that contains a little worker thread to do the actual work.
 *  The port has already been setup for baud, stop bits, parity, etc.
 *
 * @param serial Serial port object , the channel
 * @param baudRate baud rate, information only.
 * @param model "Layout Model" to hold unit IO states.
 *///from  ww  w.  j a  v a  2s  .co  m
public CmriPollMachine(SerialPort serial, int baudRate, CmriLayoutModelImpl model) {
    this.m_model = model;
    this.m_recovery_rate = CmriSerialLayoutTransport.DEFAULT_DISCOVERY_RATE;
    this.m_port = serial;
    this.m_baud_rate = baudRate;

    this.m_consecutive_missed_polls = new int[CMRI_HIGHEST_POLL_ADDR + 1];

    // ``"Synchronized" [collection] classes can be useful when you need to prevent all access to
    //   a collection via a single lock, at the expense of poorer scalability.''
    this.m_active_queue = Queues.synchronizedQueue(new java.util.LinkedList<Integer>());
    this.m_revive_queue = Queues.synchronizedQueue(new java.util.LinkedList<Integer>());
}

From source file:org.apache.streams.riak.binary.RiakBinaryPersistReader.java

private Queue<StreamsDatum> constructQueue() {
    return Queues.synchronizedQueue(new LinkedBlockingQueue<StreamsDatum>(10000));
}

From source file:com.github.rinde.logistics.pdptw.mas.comm.RtSolverBidder.java

RtSolverBidder(ObjectiveFunction objFunc, RealtimeSolver s, BidFunction bidFunc, long cooldown,
        boolean reauctEnabled) {
    super(SetFactories.synchronizedFactory(SetFactories.linkedHashSet()));
    objectiveFunction = objFunc;/*from w  w w . ja  v a  2  s.c  om*/
    solver = s;
    solverHandle = Optional.absent();
    bidFunction = bidFunc;
    cfbQueue = Queues.synchronizedQueue(new LinkedList<CallForBids>());
    parcelAuctioneers = new LinkedHashMap<>();
    reauctioning = new AtomicBoolean();
    computing = new AtomicBoolean();
    reauctionCooldownPeriod = cooldown;
    reauctionsEnabled = reauctEnabled;
}

From source file:org.apache.streams.s3.S3PersistReader.java

@Override
public void prepare(Object configurationObject) {

    lineReaderUtil = LineReadWriteUtil.getInstance(s3ReaderConfiguration);
    // Connect to S3
    synchronized (this) {
        // Create the credentials Object
        AWSCredentials credentials = new BasicAWSCredentials(s3ReaderConfiguration.getKey(),
                s3ReaderConfiguration.getSecretKey());

        ClientConfiguration clientConfig = new ClientConfiguration();
        clientConfig.setProtocol(Protocol.valueOf(s3ReaderConfiguration.getProtocol().toString()));

        // We do not want path style access
        S3ClientOptions clientOptions = new S3ClientOptions();
        clientOptions.setPathStyleAccess(false);

        this.amazonS3Client = new AmazonS3Client(credentials, clientConfig);
        if (StringUtils.isNotEmpty(s3ReaderConfiguration.getRegion())) {
            this.amazonS3Client
                    .setRegion(Region.getRegion(Regions.fromName(s3ReaderConfiguration.getRegion())));
        }//from w  w w .  j  a  va2s  . c o  m
        this.amazonS3Client.setS3ClientOptions(clientOptions);
    }

    final ListObjectsRequest request = new ListObjectsRequest()
            .withBucketName(this.s3ReaderConfiguration.getBucket())
            .withPrefix(s3ReaderConfiguration.getReaderPath()).withMaxKeys(500);

    ObjectListing listing = this.amazonS3Client.listObjects(request);

    this.files = new ArrayList<>();

    /*
     * If you can list files that are in this path, then you must be dealing with a directory
     * if you cannot list files that are in this path, then you are most likely dealing with
     * a simple file.
     */
    boolean hasCommonPrefixes = listing.getCommonPrefixes().size() > 0;
    boolean hasObjectSummaries = listing.getObjectSummaries().size() > 0;

    if (hasCommonPrefixes || hasObjectSummaries) {
        // Handle the 'directory' use case
        do {
            if (hasCommonPrefixes) {
                for (String file : listing.getCommonPrefixes()) {
                    this.files.add(file);
                }
            } else {
                for (final S3ObjectSummary objectSummary : listing.getObjectSummaries()) {
                    this.files.add(objectSummary.getKey());
                }
            }

            // get the next batch.
            listing = this.amazonS3Client.listNextBatchOfObjects(listing);
        } while (listing.isTruncated());
    } else {
        // handle the single file use-case
        this.files.add(s3ReaderConfiguration.getReaderPath());
    }

    if (this.files.size() <= 0) {
        LOGGER.error("There are no files to read");
    }

    this.persistQueue = Queues.synchronizedQueue(new LinkedBlockingQueue<StreamsDatum>(10000));
    this.executor = Executors.newSingleThreadExecutor();
}

From source file:org.apache.streams.hdfs.WebHdfsPersistReader.java

@Override
public void prepare(Object configurationObject) {
    LOGGER.debug("Prepare");
    lineReaderUtil = LineReadWriteUtil.getInstance(hdfsConfiguration);
    connectToWebHDFS();/*w w  w  .  j a  v  a  2 s  . c  o m*/
    String pathString = hdfsConfiguration.getPath() + "/" + hdfsConfiguration.getReaderPath();
    LOGGER.info("Path : {}", pathString);
    path = new Path(pathString);
    try {
        if (client.isFile(path)) {
            LOGGER.info("Found File");
            FileStatus fileStatus = client.getFileStatus(path);
            status = new FileStatus[1];
            status[0] = fileStatus;
        } else if (client.isDirectory(path)) {
            status = client.listStatus(path);
            List<FileStatus> statusList = Arrays.asList(status);
            Collections.sort(statusList);
            status = statusList.toArray(new FileStatus[0]);
            LOGGER.info("Found Directory : {} files", status.length);
        } else {
            LOGGER.error("Neither file nor directory, wtf");
        }
    } catch (IOException ex) {
        LOGGER.error("IOException", ex);
    }
    streamsConfiguration = StreamsConfigurator.detectConfiguration();
    persistQueue = Queues.synchronizedQueue(
            new LinkedBlockingQueue<StreamsDatum>(streamsConfiguration.getBatchSize().intValue()));
    executor = Executors.newSingleThreadExecutor();
    mapper = StreamsJacksonMapper.getInstance();
}

From source file:eu.itesla_project.offline.OfflineWorkflowImpl.java

@Override
protected void startImpl(final WorkflowStartContext startContext) throws Exception {
    Network network = loadAndMergeNetwork(creationParameters.getBaseCaseDate(), LOAD_FLOW_PRIORITY);

    // We want to work on multiple samples at the same time, so we are going
    // to use the multi-states feature of IIDM network model. Each of the
    // sample is mapped to a state created by cloning the initial state of
    // the network
    network.getStateManager().allowStateMultiThreadAccess(true);
    network.getStateManager().setWorkingState(StateManager.INITIAL_STATE_ID);

    Networks.printBalanceSummary("snapshot", network, LOGGER);

    try (HistoDbClient histoDbClient = histoDbClientFactory.create();
            TopologyMiner topologyMiner = topologyMinerFactory.create()) {

        ContingenciesAndActionsDatabaseClient cadbClient = cadbClientFactory.create();

        // prepare base case
        TopologyContext topologyContext = prepareBaseCase(network, creationParameters, histoDbClient,
                topologyMiner, computationManager);

        Networks.printBalanceSummary("base case", network, LOGGER);

        LOGGER.info("{} contingencies", cadbClient.getContingencies(network).size());

        //note: if ~/sampler2wp41.properties file does not exist, uses a mocksampler; otherwise uses a sampler provided by matlab-integration module
        //      samplers require MATLAB Compiler Runtime (ref readme.txt in matlab-integration)
        Sampler sampler = samplerFactory.create(network, computationManager, SAMPLING_PRIORITY, histoDbClient);

        LOGGER.info("Sampling module: {} {}", sampler.getName(), Objects.toString(sampler.getVersion(), ""));

        Stabilization stabilization = simulatorFactory.createStabilization(network, computationManager,
                STABILIZATION_PRIORITY);
        ImpactAnalysis impactAnalysis = simulatorFactory.createImpactAnalysis(network, computationManager,
                IMPACT_ANALYSIS_PRIORITY, cadbClient);
        Optimizer optimizer = optimizerFactory.create(network, computationManager, STARTING_POINT_INIT_PRIORITY,
                histoDbClient, topologyMiner);
        LoadFlow loadFlow = loadFlowFactory.create(network, computationManager, LOAD_FLOW_PRIORITY);

        LOGGER.info("Starting point init module: {} {}", optimizer.getName(),
                Objects.toString(optimizer.getVersion(), ""));
        LOGGER.info("Load flow module: {} {}", loadFlow.getName(), Objects.toString(loadFlow.getVersion(), ""));
        LOGGER.info("Stabilization module: {} {}", stabilization.getName(),
                Objects.toString(stabilization.getVersion(), ""));
        LOGGER.info("Impact analysis module: {} {}", impactAnalysis.getName(),
                Objects.toString(impactAnalysis.getVersion(), ""));

        int cores = computationManager.getResourcesStatus().getAvailableCores();
        final int stateQueueSize = startContext.getStartParameters().getStateQueueSize() != -1
                ? startContext.getStartParameters().getStateQueueSize()
                : 2;/*from ww  w .ja  v  a 2 s  .c o  m*/

        LOGGER.trace("State queue initial size: {}", stateQueueSize);

        // module initializations
        sampler.init(new SamplerParameters(creationParameters.getHistoInterval(),
                creationParameters.isGenerationSampled(), creationParameters.isBoundariesSampled()));
        optimizer.init(new OptimizerParameters(creationParameters.getHistoInterval()), topologyContext);
        Map<String, Object> simulationInitContext = new HashMap<>();
        SimulationParameters simulationParameters = SimulationParameters.load();
        LOGGER.info(simulationParameters.toString());
        stabilization.init(simulationParameters, simulationInitContext);
        impactAnalysis.init(simulationParameters, simulationInitContext);

        changeWorkflowStatus(new OfflineWorkflowStatus(id, OfflineWorkflowStep.SAMPLING, creationParameters,
                startContext.getStartParameters()));

        LoadFlowParameters loadFlowParameters = createLoadFlowParameters();
        final WorkflowContext context = new WorkflowContext(network, sampler, optimizer, loadFlow,
                stabilization, impactAnalysis, loadFlowParameters);

        final BlockingDeque<Sample> samples = new ForwardingBlockingDeque<Sample>() {

            private final BlockingDeque<Sample> delegate = new LinkedBlockingDeque<>(
                    startContext.getStartParameters().getSampleQueueSize());

            @Override
            protected BlockingDeque<Sample> delegate() {
                return delegate;
            }

            @Override
            public boolean offer(Sample o) {
                boolean inserted = super.offer(o);
                LOGGER.trace("Sample queue size ++: {}", delegate.size());
                return inserted;
            }

            @Override
            public Sample poll(long timeout, TimeUnit unit) throws InterruptedException {
                Sample sample = super.poll(timeout, unit);
                if (sample != null) {
                    int size = delegate.size();
                    LOGGER.trace("Sample queue size --: {}", size);
                    if (size == 0) {
                        LOGGER.warn("Sample queue is empty");
                    }
                }
                return sample;
            }

        };

        List<Future<?>> sampleFutures = new ArrayList<>();
        final Semaphore samplePermits = new Semaphore(startContext.getStartParameters().getSampleQueueSize(),
                true);

        for (int i = 0; i < startContext.getStartParameters().getSamplingThreads(); i++) {
            sampleFutures.add(executorService.submit(() -> {
                try {
                    // and then we continousloy re-sample
                    while (!isStopRequested(startContext)) {
                        // wait for a sample permit
                        while (!isStopRequested(startContext) && !samplePermits.tryAcquire(
                                startContext.getStartParameters().getSamplesPerThread(), TIMEOUT,
                                TimeUnit.MILLISECONDS)) {
                        }

                        if (!isStopRequested(startContext)) {
                            SamplerResult result = runSampling(
                                    startContext.getStartParameters().getSamplesPerThread(), context,
                                    startContext);
                            if (result != null && result.isOk()) {
                                for (Sample sample : result.getSamples()) {
                                    samples.offer(sample);
                                }
                            }
                        }
                    }
                } catch (Throwable e) {
                    LOGGER.error(e.toString(), e);
                }
            }));
        }

        List<Future<?>> stateFutures = new ArrayList<>();
        final Semaphore statePermits = new Semaphore(stateQueueSize, true);
        final Queue<SimulationState> states = Queues
                .synchronizedQueue(new ArrayDeque<SimulationState>(stateQueueSize));
        final Map<SimulationState, Integer> sampleIds = Collections.synchronizedMap(new HashMap<>());

        for (int i = 0; i < stateQueueSize; i++) {
            stateFutures.add(executorService.submit(() -> {
                try {
                    while (!isStopRequested(startContext)) {
                        // wait for a state permit
                        while (!isStopRequested(startContext)
                                && !statePermits.tryAcquire(TIMEOUT, TimeUnit.MILLISECONDS)) {
                        }

                        // wait for a sample to be available
                        Sample sample = null;
                        while (!isStopRequested(startContext)
                                && (sample = samples.poll(TIMEOUT, TimeUnit.MILLISECONDS)) == null) {
                        }

                        if (sample != null) {
                            samplePermits.release();

                            String stateId = "Sample-" + sample.getId();

                            // create a new network state
                            context.getNetwork().getStateManager().cloneState(StateManager.INITIAL_STATE_ID,
                                    stateId);
                            try {
                                // set current thread working state
                                context.getNetwork().getStateManager().setWorkingState(stateId);

                                // apply the sample to the network
                                sample.apply(context.getNetwork());

                                SimulationState state = null;
                                if (!isStopRequested(startContext)
                                        && runStartingPointInit(context, startContext, sample.getId())
                                        && !isStopRequested(startContext)
                                        && runLoadFlow(context, startContext, sample)
                                        && !isStopRequested(startContext) && (state = runStabilization(context,
                                                startContext, sample.getId())) != null) {
                                    sampleIds.put(state, sample.getId());
                                    states.add(state);
                                    LOGGER.trace("State queue size ++: {}", states.size());
                                } else {
                                    statePermits.release();
                                }

                                // in any case store the sample in the and simulation db and validation
                                // db for later deep analysis
                                storeState(sample.getId(), context.getNetwork());

                                startContext.incrementProcessedSamples();

                                try {
                                    validationDb.save(context.getNetwork(),
                                            OfflineWorkflow.getValidationDir(id), "sample-" + sample.getId());
                                } catch (Exception e) {
                                    LOGGER.error(e.toString(), e);
                                }
                            } finally {
                                context.getNetwork().getStateManager().removeState(stateId);
                            }
                        }
                    }
                } catch (Throwable e) {
                    LOGGER.error(e.toString(), e);
                }
            }));
        }

        ImpactAnalysisController controller = new ImpactAnalysisController() {

            @Override
            public boolean isStopRequested() {
                return OfflineWorkflowImpl.this.isStopRequested(startContext);
            }

            @Override
            public SimulationState nextState() {
                SimulationState state = states.poll();
                if (state != null) {
                    statePermits.release();
                    int size = states.size();
                    LOGGER.trace("State queue size --: {}", size);
                    if (size == 0) {
                        LOGGER.warn("State queue is empty");
                    }
                }
                return state;
            }

        };

        ImpactAnalysisResultCallback callback = new ImpactAnalysisResultCallback() {

            @Override
            public void onStart(SimulationState state) {
                int sampleId = sampleIds.get(state);
                LOGGER.debug("Workflow {}, sample {}: impact analysis started", id, sampleId);
            }

            @Override
            public void onResult(SimulationState state, ImpactAnalysisResult result) {
                try {
                    int sampleId = sampleIds.remove(state);
                    storeImpactAnalysisResults(context, startContext, sampleId, result);
                } catch (Exception e) {
                    LOGGER.error(e.toString(), e);
                }
            }
        };

        try {
            runImpactAnalysis(context, cores, controller, callback, cadbClient);
        } catch (Exception e) {
            LOGGER.error(e.toString(), e);
            stopRequested.set(true);
        }

        for (Future<?> f : stateFutures) {
            f.get();
        }

        samplePermits.release(samples.size());

        for (Future<?> f : sampleFutures) {
            f.get();
        }

        // some offline db implementation may require to explicitly flush data on disk
        offlineDb.flush(id);

        // clean samples synthesis
        sampleSynthesisLock.lock();
        try {
            samplesSynthesis.clear();
        } finally {
            sampleSynthesisLock.unlock();
        }
        notifySampleSynthesisChange();
    }
}