Example usage for java.util.concurrent.atomic AtomicInteger AtomicInteger

List of usage examples for java.util.concurrent.atomic AtomicInteger AtomicInteger

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger AtomicInteger.

Prototype

public AtomicInteger(int initialValue) 

Source Link

Document

Creates a new AtomicInteger with the given initial value.

Usage

From source file:org.lol.reddit.reddit.api.RedditAPIIndividualSubredditDataRequester.java

public void performRequest(final Collection<String> subredditCanonicalIds, final TimestampBound timestampBound,
        final RequestResponseHandler<HashMap<String, RedditSubreddit>, SubredditRequestFailure> handler) {

    // TODO if there's a bulk API to do this, that would be good... :)

    final HashMap<String, RedditSubreddit> result = new HashMap<String, RedditSubreddit>();
    final AtomicBoolean stillOkay = new AtomicBoolean(true);
    final AtomicInteger requestsToGo = new AtomicInteger(subredditCanonicalIds.size());
    final AtomicLong oldestResult = new AtomicLong(Long.MAX_VALUE);

    final RequestResponseHandler<RedditSubreddit, SubredditRequestFailure> innerHandler = new RequestResponseHandler<RedditSubreddit, SubredditRequestFailure>() {
        @Override//from w  ww  . j  av a2 s .  c  o m
        public void onRequestFailed(SubredditRequestFailure failureReason) {
            synchronized (result) {
                if (stillOkay.get()) {
                    stillOkay.set(false);
                    handler.onRequestFailed(failureReason);
                }
            }
        }

        @Override
        public void onRequestSuccess(RedditSubreddit innerResult, long timeCached) {
            synchronized (result) {
                if (stillOkay.get()) {

                    result.put(innerResult.getKey(), innerResult);
                    oldestResult.set(Math.min(oldestResult.get(), timeCached));

                    if (requestsToGo.decrementAndGet() == 0) {
                        handler.onRequestSuccess(result, oldestResult.get());
                    }
                }
            }
        }
    };

    for (String subredditCanonicalId : subredditCanonicalIds) {
        performRequest(subredditCanonicalId, timestampBound, innerHandler);
    }
}

From source file:dk.statsbiblioteket.util.JobControllerTest.java

public void testRemoveCallback() throws Exception {
    final int JOBS = 10;
    final AtomicInteger counter = new AtomicInteger(0);
    JobController<Long> controller = new JobController<Long>(10) {
        @Override//from w ww  . jav a 2s .  com
        protected void afterExecute(Future<Long> finished) {
            counter.incrementAndGet();
        }
    };
    for (int i = 0; i < JOBS; i++) {
        controller.submit(new Shout(10));
    }
    synchronized (Thread.currentThread()) {
        Thread.currentThread().wait(100);
    }
    assertEquals("The number of pops should match", JOBS, controller.popAll().size());
    assertEquals("The number of callbacks should match", JOBS, counter.get());
}

From source file:com.netflix.curator.framework.recipes.leader.TestLeaderSelector.java

@Test
public void testKillSession() throws Exception {
    final Timing timing = new Timing();

    CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(),
            timing.connection(), new RetryOneTime(1));
    client.start();//from w w  w  .j a v a 2s  .com
    try {
        final Semaphore semaphore = new Semaphore(0);
        final CountDownLatch interruptedLatch = new CountDownLatch(1);
        final AtomicInteger leaderCount = new AtomicInteger(0);
        LeaderSelectorListener listener = new LeaderSelectorListener() {
            private volatile Thread ourThread;

            @Override
            public void takeLeadership(CuratorFramework client) throws Exception {
                leaderCount.incrementAndGet();
                try {
                    ourThread = Thread.currentThread();
                    semaphore.release();
                    try {
                        Thread.sleep(1000000);
                    } catch (InterruptedException e) {
                        Thread.currentThread().interrupt();
                        interruptedLatch.countDown();
                    }
                } finally {
                    leaderCount.decrementAndGet();
                }
            }

            @Override
            public void stateChanged(CuratorFramework client, ConnectionState newState) {
                if ((newState == ConnectionState.LOST) && (ourThread != null)) {
                    ourThread.interrupt();
                }
            }
        };
        LeaderSelector leaderSelector1 = new LeaderSelector(client, PATH_NAME, listener);
        LeaderSelector leaderSelector2 = new LeaderSelector(client, PATH_NAME, listener);

        leaderSelector1.start();
        leaderSelector2.start();

        Assert.assertTrue(timing.acquireSemaphore(semaphore, 1));

        KillSession.kill(client.getZookeeperClient().getZooKeeper(), server.getConnectString());

        Assert.assertTrue(timing.awaitLatch(interruptedLatch));
        timing.sleepABit();

        leaderSelector1.requeue();
        leaderSelector2.requeue();

        Assert.assertTrue(timing.acquireSemaphore(semaphore, 1));
        Assert.assertEquals(leaderCount.get(), 1);

        leaderSelector1.close();
        leaderSelector2.close();
    } finally {
        client.close();
    }
}

From source file:com.alibaba.rocketmq.namesrv.NamesrvStartup.java

public static NamesrvController main0(String[] args) {
    System.setProperty(RemotingCommand.RemotingVersionKey, Integer.toString(MQVersion.CurrentVersion));

    // Socket???/*w  w  w  .  j a va 2  s  . c o m*/
    if (null == System.getProperty(NettySystemConfig.SystemPropertySocketSndbufSize)) {
        NettySystemConfig.SocketSndbufSize = 2048;
    }

    // Socket?
    if (null == System.getProperty(NettySystemConfig.SystemPropertySocketRcvbufSize)) {
        NettySystemConfig.SocketRcvbufSize = 1024;
    }

    try {
        // ?
        Options options = ServerUtil.buildCommandlineOptions(new Options());
        commandLine = ServerUtil.parseCmdLine("mqnamesrv", args, buildCommandlineOptions(options),
                new PosixParser());
        if (null == commandLine) {
            System.exit(-1);
            return null;
        }

        // ??
        final NamesrvConfig namesrvConfig = new NamesrvConfig();
        final NettyServerConfig nettyServerConfig = new NettyServerConfig();
        nettyServerConfig.setListenPort(9876);
        if (commandLine.hasOption('c')) {
            String file = commandLine.getOptionValue('c');
            if (file != null) {
                InputStream in = new BufferedInputStream(new FileInputStream(file));
                properties = new Properties();
                properties.load(in);
                MixAll.properties2Object(properties, namesrvConfig);
                MixAll.properties2Object(properties, nettyServerConfig);
                System.out.println("load config properties file OK, " + file);
                in.close();
            }
        }

        // ??
        if (commandLine.hasOption('p')) {
            MixAll.printObjectProperties(null, namesrvConfig);
            MixAll.printObjectProperties(null, nettyServerConfig);
            System.exit(0);
        }

        MixAll.properties2Object(ServerUtil.commandLine2Properties(commandLine), namesrvConfig);

        if (null == namesrvConfig.getRocketmqHome()) {
            System.out.println("Please set the " + MixAll.ROCKETMQ_HOME_ENV
                    + " variable in your environment to match the location of the RocketMQ installation");
            System.exit(-2);
        }

        // ?Logback
        LoggerContext lc = (LoggerContext) LoggerFactory.getILoggerFactory();
        JoranConfigurator configurator = new JoranConfigurator();
        configurator.setContext(lc);
        lc.reset();
        configurator.doConfigure(namesrvConfig.getRocketmqHome() + "/conf/logback_namesrv.xml");
        final Logger log = LoggerFactory.getLogger(LoggerName.NamesrvLoggerName);

        // ????
        MixAll.printObjectProperties(log, namesrvConfig);
        MixAll.printObjectProperties(log, nettyServerConfig);

        // ??
        final NamesrvController controller = new NamesrvController(namesrvConfig, nettyServerConfig);
        boolean initResult = controller.initialize();
        if (!initResult) {
            controller.shutdown();
            System.exit(-3);
        }

        Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
            private volatile boolean hasShutdown = false;
            private AtomicInteger shutdownTimes = new AtomicInteger(0);

            @Override
            public void run() {
                synchronized (this) {
                    log.info("shutdown hook was invoked, " + this.shutdownTimes.incrementAndGet());
                    if (!this.hasShutdown) {
                        this.hasShutdown = true;
                        long begineTime = System.currentTimeMillis();
                        controller.shutdown();
                        long consumingTimeTotal = System.currentTimeMillis() - begineTime;
                        log.info("shutdown hook over, consuming time total(ms): " + consumingTimeTotal);
                    }
                }
            }
        }, "ShutdownHook"));

        // ??
        controller.start();

        String tip = "The Name Server boot success.";
        log.info(tip);
        System.out.println(tip);

        return controller;
    } catch (Throwable e) {
        e.printStackTrace();
        System.exit(-1);
    }

    return null;
}

From source file:org.jasig.ssp.util.importer.job.staging.SqlServerStagingTableWriter.java

@Override
public void write(final List<? extends RawItem> items) {

    NamedParameterJdbcTemplate jdbcTemplate = new NamedParameterJdbcTemplate(dataSource);
    String fileName = items.get(0).getResource().getFilename();
    final String[] tableName = fileName.split("\\.");

    Integer batchStart = (Integer) (stepExecution.getExecutionContext().get("batchStart") == null ? null
            : stepExecution.getExecutionContext().get("batchStart"));
    Integer batchStop = (Integer) (stepExecution.getExecutionContext().get("batchStop") == null ? null
            : stepExecution.getExecutionContext().get("batchStop"));
    Object currentEntity = stepExecution.getExecutionContext().get("currentEntity");

    if (currentEntity == null || !currentEntity.equals(tableName[0])) {
        batchStart = 0;//from  w w  w .j a v  a  2  s.  c o m
        batchStop = items.size() - 1;
        currentEntity = tableName[0];
        stepExecution.getExecutionContext().put("currentEntity", currentEntity);
        stepExecution.getExecutionContext().put("batchStart", batchStart);
        stepExecution.getExecutionContext().put("batchStop", batchStop);
    } else {
        batchStart = batchStop + 1;
        batchStop = (Integer) batchStart + items.size() - 1;
        stepExecution.getExecutionContext().put("batchStart", batchStart);
        stepExecution.getExecutionContext().put("batchStop", batchStop);
    }

    RawItem firstItem = items.get(0);
    Resource firstItemResource = firstItem.getResource();

    if (currentResource == null || !(this.currentResource.equals(firstItemResource))) {
        this.orderedHeaders = writeHeader(firstItem);
        this.currentResource = firstItemResource;
    }

    StringBuilder insertSql = new StringBuilder();
    insertSql.append("INSERT INTO stg_" + tableName[0] + " (batch_id,");
    StringBuilder valuesSqlBuilder = new StringBuilder();
    valuesSqlBuilder.append(" VALUES (?,");
    for (String header : this.orderedHeaders) {
        insertSql.append(header).append(",");
        valuesSqlBuilder.append("?").append(",");
    }
    insertSql.setLength(insertSql.length() - 1); // trim comma
    valuesSqlBuilder.setLength(valuesSqlBuilder.length() - 1); // trim comma
    insertSql.append(")");
    valuesSqlBuilder.append(");");
    insertSql.append(valuesSqlBuilder);

    final AtomicInteger batchStartRef = new AtomicInteger(batchStart);
    final String sql = insertSql.toString();
    jdbcTemplate.getJdbcOperations().execute(sql, new PreparedStatementCallback() {
        @Override
        public Object doInPreparedStatement(PreparedStatement ps) throws SQLException, DataAccessException {
            for (RawItem item : items) {
                final List<Object> paramsForLog = new ArrayList(orderedHeaders.length);
                int counter = 1;
                paramsForLog.add(batchStartRef.get());
                StatementCreatorUtils.setParameterValue(ps, counter, SqlTypeValue.TYPE_UNKNOWN,
                        batchStartRef.getAndIncrement());
                counter++;
                for (String header : orderedHeaders) {
                    final Map<String, String> record = item.getRecord();
                    String value = record.get(header);
                    final Integer sqlType = metadataRepository.getRepository().getColumnMetadataRepository()
                            .getColumnMetadata(new ColumnReference(tableName[0], header)).getJavaSqlType();
                    paramsForLog.add(value);
                    StatementCreatorUtils.setParameterValue(ps, counter, sqlType, value);
                    counter++;
                }
                sayQuery(sql, paramsForLog);
                ps.addBatch();
            }
            return ps.executeBatch();
        }
    });
    batchStart = batchStartRef.get();
    say("******CHUNK SQLSERVER******");
}

From source file:com.azaptree.services.executor.ThreadPoolConfig.java

@NotNull
public ThreadFactory getThreadFactory() {
    if (StringUtils.isBlank(name) && !daemon) {
        return Executors.defaultThreadFactory();
    }/* w  w  w  . j  a  va2  s.  c  o m*/

    return new ThreadFactory() {
        private final AtomicInteger threadCounter = new AtomicInteger(0);

        @Override
        public Thread newThread(final Runnable r) {
            final Thread t = new Thread(r, String.format("%s-%d", name, threadCounter.incrementAndGet()));
            t.setDaemon(daemon);
            return t;
        }
    };
}

From source file:org.loklak.api.server.SearchServlet.java

@Override
protected void doGet(final HttpServletRequest request, final HttpServletResponse response)
        throws ServletException, IOException {
    final RemoteAccess.Post post = RemoteAccess.evaluate(request);
    try {//from ww w. ja  va  2 s  .com

        // manage DoS
        if (post.isDoS_blackout()) {
            response.sendError(503, "your (" + post.getClientHost() + ") request frequency is too high");
            return;
        }

        // check call type
        boolean jsonExt = request.getServletPath().endsWith(".json");
        boolean rssExt = request.getServletPath().endsWith(".rss");
        boolean txtExt = request.getServletPath().endsWith(".txt");

        // evaluate get parameter
        String callback = post.get("callback", "");
        boolean jsonp = callback != null && callback.length() > 0;
        boolean minified = post.get("minified", false);
        String query = post.get("q", "");
        if (query == null || query.length() == 0)
            query = post.get("query", "");
        query = CharacterCoding.html2unicode(query).replaceAll("\\+", " ");
        final long timeout = (long) post.get("timeout", DAO.getConfig("search.timeout", 2000));
        final int count = post.isDoS_servicereduction() ? 10
                : Math.min(post.get("count", post.get("maximumRecords", 100)),
                        post.isLocalhostAccess() ? 10000 : 1000);
        String source = post.isDoS_servicereduction() ? "cache" : post.get("source", "all"); // possible values: cache, backend, twitter, all
        int limit = post.get("limit", 100);
        String[] fields = post.get("fields", new String[0], ",");
        int timezoneOffset = post.get("timezoneOffset", 0);
        if (query.indexOf("id:") >= 0 && ("all".equals(source) || "twitter".equals(source)))
            source = "cache"; // id's cannot be retrieved from twitter with the scrape-api (yet), only from the cache
        final String ordername = post.get("order", Timeline.Order.CREATED_AT.getMessageFieldName());
        final Timeline.Order order = Timeline.parseOrder(ordername);

        // create tweet timeline
        final Timeline tl = new Timeline(order);
        Map<String, List<Map.Entry<String, Long>>> aggregations = null;
        final QueryEntry.Tokens tokens = new QueryEntry.Tokens(query);

        final AtomicInteger cache_hits = new AtomicInteger(0), count_backend = new AtomicInteger(0),
                count_twitter_all = new AtomicInteger(0), count_twitter_new = new AtomicInteger(0);
        final boolean backend_push = DAO.getConfig("backend.push.enabled", false);

        if ("all".equals(source)) {
            // start all targets for search concurrently
            final int timezoneOffsetf = timezoneOffset;
            final String queryf = query;
            final long start = System.currentTimeMillis();

            // start a scraper
            Thread scraperThread = tokens.raw.length() == 0 ? null : new Thread() {
                public void run() {
                    final String scraper_query = tokens.translate4scraper();
                    DAO.log(request.getServletPath() + " scraping with query: " + scraper_query);
                    Timeline twitterTl = DAO.scrapeTwitter(post, scraper_query, order, timezoneOffsetf, true,
                            timeout, true);
                    count_twitter_new.set(twitterTl.size());
                    tl.putAll(QueryEntry.applyConstraint(twitterTl, tokens, false)); // pre-localized results are not filtered with location constraint any more 
                    tl.setScraperInfo(twitterTl.getScraperInfo());
                    post.recordEvent("twitterscraper_time", System.currentTimeMillis() - start);
                }
            };
            if (scraperThread != null)
                scraperThread.start();

            // start a local search
            Thread localThread = queryf == null || queryf.length() == 0 ? null : new Thread() {
                public void run() {
                    DAO.SearchLocalMessages localSearchResult = new DAO.SearchLocalMessages(queryf, order,
                            timezoneOffsetf, count, 0);
                    post.recordEvent("cache_time", System.currentTimeMillis() - start);
                    cache_hits.set(localSearchResult.timeline.getHits());
                    tl.putAll(localSearchResult.timeline);
                }
            };
            if (localThread != null)
                localThread.start();

            // start a backend search, but only if backend_push == true or result from scraper is too bad
            boolean start_backend_thread = false;
            if (backend_push)
                start_backend_thread = true;
            else {
                // wait now for termination of scraper thread and local search
                // to evaluate how many results are available
                if (scraperThread != null)
                    try {
                        scraperThread.join(Math.max(10000, timeout - System.currentTimeMillis() + start));
                    } catch (InterruptedException e) {
                    }
                if (localThread != null)
                    try {
                        localThread.join(Math.max(100, timeout - System.currentTimeMillis() + start));
                    } catch (InterruptedException e) {
                    }
                localThread = null;
                scraperThread = null;
                if (tl.size() < count)
                    start_backend_thread = true;
            }
            Thread backendThread = tokens.original.length() == 0 || !start_backend_thread ? null
                    : new Thread() {
                        public void run() {
                            Timeline backendTl = DAO.searchBackend(tokens.original, order, count,
                                    timezoneOffsetf, "cache", timeout);
                            if (backendTl != null) {
                                tl.putAll(QueryEntry.applyConstraint(backendTl, tokens, true));
                                count_backend.set(tl.size());
                                // TODO: read and aggregate aggregations from backend as well
                            }
                            post.recordEvent("backend_time", System.currentTimeMillis() - start);
                        }
                    };
            if (backendThread != null)
                backendThread.start();

            // wait for termination of all threads
            if (scraperThread != null)
                try {
                    scraperThread.join(Math.max(10000, timeout - System.currentTimeMillis() + start));
                } catch (InterruptedException e) {
                }
            if (localThread != null)
                try {
                    localThread.join(Math.max(100, timeout - System.currentTimeMillis() + start));
                } catch (InterruptedException e) {
                }
            if (backendThread != null)
                try {
                    backendThread.join(Math.max(100, timeout - System.currentTimeMillis() + start));
                } catch (InterruptedException e) {
                }

        } else if ("twitter".equals(source) && tokens.raw.length() > 0) {
            final long start = System.currentTimeMillis();
            final String scraper_query = tokens.translate4scraper();
            DAO.log(request.getServletPath() + " scraping with query: " + scraper_query);
            Timeline twitterTl = DAO.scrapeTwitter(post, scraper_query, order, timezoneOffset, true, timeout,
                    true);
            count_twitter_new.set(twitterTl.size());
            tl.putAll(QueryEntry.applyConstraint(twitterTl, tokens, false)); // pre-localized results are not filtered with location constraint any more 
            tl.setScraperInfo(twitterTl.getScraperInfo());
            post.recordEvent("twitterscraper_time", System.currentTimeMillis() - start);
            // in this case we use all tweets, not only the latest one because it may happen that there are no new and that is not what the user expects

        } else if ("cache".equals(source)) {
            final long start = System.currentTimeMillis();
            DAO.SearchLocalMessages localSearchResult = new DAO.SearchLocalMessages(query, order,
                    timezoneOffset, count, limit, fields);
            cache_hits.set(localSearchResult.timeline.getHits());
            tl.putAll(localSearchResult.timeline);
            aggregations = localSearchResult.aggregations;
            post.recordEvent("cache_time", System.currentTimeMillis() - start);

        } else if ("backend".equals(source) && query.length() > 0) {
            final long start = System.currentTimeMillis();
            Timeline backendTl = DAO.searchBackend(query, order, count, timezoneOffset, "cache", timeout);
            if (backendTl != null) {
                tl.putAll(QueryEntry.applyConstraint(backendTl, tokens, true));
                tl.setScraperInfo(backendTl.getScraperInfo());
                // TODO: read and aggregate aggregations from backend as well
                count_backend.set(tl.size());
            }
            post.recordEvent("backend_time", System.currentTimeMillis() - start);

        }

        final long start = System.currentTimeMillis();
        // check the latest user_ids
        DAO.announceNewUserId(tl);

        // reduce the list to the wanted number of results if we have more
        tl.reduceToMaxsize(count);

        if (post.isDoS_servicereduction() && !RemoteAccess.isSleepingForClient(post.getClientHost())) {
            RemoteAccess.sleep(post.getClientHost(), 2000);
        }

        // create json or xml according to path extension
        int shortlink_iflinkexceedslength = (int) DAO.getConfig("shortlink.iflinkexceedslength", 500L);
        String shortlink_urlstub = DAO.getConfig("shortlink.urlstub", "http://localhost:9000");
        if (jsonExt) {
            post.setResponse(response, jsonp ? "application/javascript" : "application/json");
            // generate json
            Map<String, Object> m = new LinkedHashMap<String, Object>();
            Map<String, Object> metadata = new LinkedHashMap<String, Object>();
            if (!minified) {
                m.put("readme_0",
                        "THIS JSON IS THE RESULT OF YOUR SEARCH QUERY - THERE IS NO WEB PAGE WHICH SHOWS THE RESULT!");
                m.put("readme_1",
                        "loklak.org is the framework for a message search system, not the portal, read: http://loklak.org/about.html#notasearchportal");
                m.put("readme_2",
                        "This is supposed to be the back-end of a search portal. For the api, see http://loklak.org/api.html");
                m.put("readme_3",
                        "Parameters q=(query), source=(cache|backend|twitter|all), callback=p for jsonp, maximumRecords=(message count), minified=(true|false)");
            }
            metadata.put("itemsPerPage", Integer.toString(count));
            metadata.put("count", Integer.toString(tl.size()));
            metadata.put("count_twitter_all", count_twitter_all.get());
            metadata.put("count_twitter_new", count_twitter_new.get());
            metadata.put("count_backend", count_backend.get());
            metadata.put("count_cache", cache_hits.get());
            metadata.put("hits", Math.max(cache_hits.get(), tl.size()));
            if (order == Timeline.Order.CREATED_AT)
                metadata.put("period", tl.period());
            metadata.put("query", query);
            metadata.put("client", post.getClientHost());
            metadata.put("time", System.currentTimeMillis() - post.getAccessTime());
            metadata.put("servicereduction", post.isDoS_servicereduction() ? "true" : "false");
            if (tl.getScraperInfo().length() > 0)
                metadata.put("scraperInfo", tl.getScraperInfo());
            m.put("search_metadata", metadata);
            List<Object> statuses = new ArrayList<>();
            try {
                for (MessageEntry t : tl) {
                    UserEntry u = tl.getUser(t);
                    if (DAO.getConfig("flag.fixunshorten", false))
                        t.setText(TwitterScraper
                                .unshorten(t.getText(shortlink_iflinkexceedslength, shortlink_urlstub)));
                    statuses.add(t.toMap(u, true, shortlink_iflinkexceedslength, shortlink_urlstub));
                }
            } catch (ConcurrentModificationException e) {
                // late incoming messages from concurrent peer retrieval may cause this
                // we silently do nothing here and return what we listed so far
            }
            m.put("statuses", statuses);

            // aggregations
            Map<String, Object> agg = new LinkedHashMap<String, Object>();
            if (aggregations != null) {
                for (Map.Entry<String, List<Map.Entry<String, Long>>> aggregation : aggregations.entrySet()) {
                    Map<String, Object> facet = new LinkedHashMap<>();
                    for (Map.Entry<String, Long> a : aggregation.getValue()) {
                        if (a.getValue().equals(query))
                            continue; // we omit obvious terms that cannot be used for faceting, like search for "#abc" -> most hashtag is "#abc"
                        facet.put(a.getKey(), a.getValue());
                    }
                    agg.put(aggregation.getKey(), facet);
                }
            }
            m.put("aggregations", agg);

            // write json
            response.setCharacterEncoding("UTF-8");
            PrintWriter sos = response.getWriter();
            if (jsonp)
                sos.print(callback + "(");
            sos.print(minified ? new ObjectMapper().writer().writeValueAsString(m)
                    : new ObjectMapper().writerWithDefaultPrettyPrinter().writeValueAsString(m));
            if (jsonp)
                sos.println(");");
            sos.println();
        } else if (rssExt) {
            response.setCharacterEncoding("UTF-8");
            post.setResponse(response, "application/rss+xml;charset=utf-8");
            // generate xml
            RSSMessage channel = new RSSMessage();
            channel.setPubDate(new Date());
            channel.setTitle("RSS feed for Twitter search for " + query);
            channel.setDescription("");
            channel.setLink("");
            RSSFeed feed = new RSSFeed(tl.size());
            feed.setChannel(channel);
            try {
                for (MessageEntry t : tl) {
                    UserEntry u = tl.getUser(t);
                    RSSMessage m = new RSSMessage();
                    m.setLink(t.getStatusIdUrl().toExternalForm());
                    m.setAuthor(u.getName() + " @" + u.getScreenName());
                    m.setTitle(u.getName() + " @" + u.getScreenName());
                    m.setDescription(t.getText(shortlink_iflinkexceedslength, shortlink_urlstub));
                    m.setPubDate(t.getCreatedAt());
                    m.setGuid(t.getIdStr());
                    feed.addMessage(m);
                }
            } catch (ConcurrentModificationException e) {
                // late incoming messages from concurrent peer retrieval may cause this
                // we silently do nothing here and return what we listed so far
            }
            String rss = feed.toString();
            //System.out.println("feed has " + feed.size() + " entries");

            // write xml
            response.getOutputStream().write(UTF8.getBytes(rss));
        } else if (txtExt) {
            post.setResponse(response, "text/plain");
            final StringBuilder buffer = new StringBuilder(1000);
            try {
                for (MessageEntry t : tl) {
                    UserEntry u = tl.getUser(t);
                    buffer.append(t.getCreatedAt()).append(" ").append(u.getScreenName()).append(": ")
                            .append(t.getText(shortlink_iflinkexceedslength, shortlink_urlstub)).append('\n');
                }
            } catch (ConcurrentModificationException e) {
                // late incoming messages from concurrent peer retrieval may cause this
                // we silently do nothing here and return what we listed so far
            }
            response.getOutputStream().write(UTF8.getBytes(buffer.toString()));
        }
        post.recordEvent("result_count", tl.size());
        post.recordEvent("postprocessing_time", System.currentTimeMillis() - start);
        Map<String, Object> hits = new LinkedHashMap<>();
        hits.put("count_twitter_all", count_twitter_all.get());
        hits.put("count_twitter_new", count_twitter_new.get());
        hits.put("count_backend", count_backend.get());
        hits.put("cache_hits", cache_hits.get());
        post.recordEvent("hits", hits);
        DAO.log(request.getServletPath() + "?" + request.getQueryString() + " -> " + tl.size()
                + " records returned, " + count_twitter_new.get() + " new");
        post.finalize();
    } catch (Throwable e) {
        Log.getLog().warn(e.getMessage(), e);
        //e.printStackTrace();
    }
}

From source file:com.indeed.lsmtree.recordlog.TestRecordLogDirectory.java

public void testRandomWithReader() throws Exception {
    final RecordLogDirectory<String> fileCache = createRecordLogDirectory();
    final AtomicInteger done = new AtomicInteger(8);
    for (int i = 0; i < 8; i++) {
        final int index = i;
        new Thread(new Runnable() {
            @Override//  w w  w .j  av  a 2 s .  c  o  m
            public void run() {
                try {
                    final Random r = new Random(index);
                    for (int i = 0; i < 10000; i++) {
                        int rand = r.nextInt(positions.size());
                        final RecordFile.Reader<String> reader = fileCache.reader(positions.get(rand));
                        assertTrue(reader.next());
                        assertEquals(reader.get(), strings.get(rand));
                        reader.close();
                    }
                } catch (IOException e) {
                    throw new RuntimeException(e);
                } finally {
                    done.decrementAndGet();
                }
            }
        }).start();
    }
    while (done.get() > 0) {
        Thread.yield();
    }
    fileCache.close();
}

From source file:com.ict.dtube.filtersrv.FiltersrvStartup.java

public static FiltersrvController main0(String[] args) {
    System.setProperty(RemotingCommand.RemotingVersionKey, Integer.toString(MQVersion.CurrentVersion));

    // Socket???//from   w  w  w.  j  a v  a  2  s  .  c  om
    if (null == System.getProperty(NettySystemConfig.SystemPropertySocketSndbufSize)) {
        NettySystemConfig.SocketSndbufSize = 65535;
    }

    // Socket?
    if (null == System.getProperty(NettySystemConfig.SystemPropertySocketRcvbufSize)) {
        NettySystemConfig.SocketRcvbufSize = 1024;
    }

    try {
        // ?
        Options options = ServerUtil.buildCommandlineOptions(new Options());
        final CommandLine commandLine = ServerUtil.parseCmdLine("mqfiltersrv", args,
                buildCommandlineOptions(options), new PosixParser());
        if (null == commandLine) {
            System.exit(-1);
            return null;
        }

        // ??
        final FiltersrvConfig filtersrvConfig = new FiltersrvConfig();
        final NettyServerConfig nettyServerConfig = new NettyServerConfig();

        if (commandLine.hasOption('c')) {
            String file = commandLine.getOptionValue('c');
            if (file != null) {
                InputStream in = new BufferedInputStream(new FileInputStream(file));
                Properties properties = new Properties();
                properties.load(in);
                MixAll.properties2Object(properties, filtersrvConfig);
                System.out.println("load config properties file OK, " + file);
                in.close();

                String port = properties.getProperty("listenPort");
                if (port != null) {
                    filtersrvConfig.setConnectWhichBroker(String.format("127.0.0.1:%s", port));
                }
            }
        }

        // 0???
        nettyServerConfig.setListenPort(0);

        nettyServerConfig.setServerAsyncSemaphoreValue(filtersrvConfig.getFsServerAsyncSemaphoreValue());
        nettyServerConfig
                .setServerCallbackExecutorThreads(filtersrvConfig.getFsServerCallbackExecutorThreads());
        nettyServerConfig.setServerWorkerThreads(filtersrvConfig.getFsServerWorkerThreads());

        // ??
        if (commandLine.hasOption('p')) {
            MixAll.printObjectProperties(null, filtersrvConfig);
            MixAll.printObjectProperties(null, nettyServerConfig);
            System.exit(0);
        }

        MixAll.properties2Object(ServerUtil.commandLine2Properties(commandLine), filtersrvConfig);

        if (null == filtersrvConfig.getDtubeHome()) {
            System.out.println("Please set the " + MixAll.DTUBE_HOME_ENV
                    + " variable in your environment to match the location of the Dtube installation");
            System.exit(-2);
        }

        // ?Logback
        LoggerContext lc = (LoggerContext) LoggerFactory.getILoggerFactory();
        JoranConfigurator configurator = new JoranConfigurator();
        configurator.setContext(lc);
        lc.reset();
        configurator.doConfigure(filtersrvConfig.getDtubeHome() + "/conf/logback_filtersrv.xml");
        final Logger log = LoggerFactory.getLogger(LoggerName.FiltersrvLoggerName);

        // ??
        final FiltersrvController controller = new FiltersrvController(filtersrvConfig, nettyServerConfig);
        boolean initResult = controller.initialize();
        if (!initResult) {
            controller.shutdown();
            System.exit(-3);
        }

        Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
            private volatile boolean hasShutdown = false;
            private AtomicInteger shutdownTimes = new AtomicInteger(0);

            @Override
            public void run() {
                synchronized (this) {
                    log.info("shutdown hook was invoked, " + this.shutdownTimes.incrementAndGet());
                    if (!this.hasShutdown) {
                        this.hasShutdown = true;
                        long begineTime = System.currentTimeMillis();
                        controller.shutdown();
                        long consumingTimeTotal = System.currentTimeMillis() - begineTime;
                        log.info("shutdown hook over, consuming time total(ms): " + consumingTimeTotal);
                    }
                }
            }
        }, "ShutdownHook"));

        // ??
        controller.start();

        String tip = "The Filter Server boot success, " + controller.localAddr();
        log.info(tip);
        System.out.println(tip);

        return controller;
    } catch (Throwable e) {
        e.printStackTrace();
        System.exit(-1);
    }

    return null;
}

From source file:org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrServerTest.java

@Test
public void testConcurrentUpdate() throws Exception {
    TestServlet.clear();/*  w  w w  .jav a 2 s  . c  om*/

    String serverUrl = jetty.getBaseUrl().toString() + "/cuss/foo";

    int cussThreadCount = 2;
    int cussQueueSize = 100;

    // for tracking callbacks from CUSS
    final AtomicInteger successCounter = new AtomicInteger(0);
    final AtomicInteger errorCounter = new AtomicInteger(0);
    final StringBuilder errors = new StringBuilder();

    @SuppressWarnings("serial")
    ConcurrentUpdateSolrServer cuss = new ConcurrentUpdateSolrServer(serverUrl, cussQueueSize,
            cussThreadCount) {
        @Override
        public void handleError(Throwable ex) {
            errorCounter.incrementAndGet();
            errors.append(" " + ex);
        }

        @Override
        public void onSuccess(HttpResponse resp) {
            successCounter.incrementAndGet();
        }
    };

    cuss.setParser(new BinaryResponseParser());
    cuss.setRequestWriter(new BinaryRequestWriter());
    cuss.setPollQueueTime(0);

    // ensure it doesn't block where there's nothing to do yet
    cuss.blockUntilFinished();

    int poolSize = 5;
    ExecutorService threadPool = Executors.newFixedThreadPool(poolSize,
            new SolrjNamedThreadFactory("testCUSS"));

    int numDocs = 100;
    int numRunnables = 5;
    for (int r = 0; r < numRunnables; r++)
        threadPool.execute(new SendDocsRunnable(String.valueOf(r), numDocs, cuss));

    // ensure all docs are sent
    threadPool.awaitTermination(5, TimeUnit.SECONDS);
    threadPool.shutdown();

    // wait until all requests are processed by CUSS 
    cuss.blockUntilFinished();
    cuss.shutdownNow();

    assertEquals("post", TestServlet.lastMethod);

    // expect all requests to be successful
    int expectedSuccesses = TestServlet.numReqsRcvd.get();
    assertTrue(expectedSuccesses > 0); // at least one request must have been sent

    assertTrue("Expected no errors but got " + errorCounter.get() + ", due to: " + errors.toString(),
            errorCounter.get() == 0);
    assertTrue("Expected " + expectedSuccesses + " successes, but got " + successCounter.get(),
            successCounter.get() == expectedSuccesses);

    int expectedDocs = numDocs * numRunnables;
    assertTrue("Expected CUSS to send " + expectedDocs + " but got " + TestServlet.numDocsRcvd.get(),
            TestServlet.numDocsRcvd.get() == expectedDocs);
}