Example usage for java.util.concurrent.atomic AtomicInteger set

List of usage examples for java.util.concurrent.atomic AtomicInteger set

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger set.

Prototype

public final void set(int newValue) 

Source Link

Document

Sets the value to newValue , with memory effects as specified by VarHandle#setVolatile .

Usage

From source file:org.loklak.api.search.SearchServlet.java

@Override
protected void doGet(final HttpServletRequest request, final HttpServletResponse response)
        throws ServletException, IOException {
    final Query post = RemoteAccess.evaluate(request);
    try {//  www  .  j  a v a 2s  .co m

        // manage DoS
        if (post.isDoS_blackout()) {
            response.sendError(503, "your (" + post.getClientHost() + ") request frequency is too high");
            return;
        }

        // check call type
        boolean jsonExt = request.getServletPath().endsWith(".json");
        boolean rssExt = request.getServletPath().endsWith(".rss");
        boolean txtExt = request.getServletPath().endsWith(".txt");

        // evaluate get parameter
        String callback = post.get("callback", "");
        boolean jsonp = callback != null && callback.length() > 0;
        boolean minified = post.get("minified", false);
        boolean shortlink_request = post.get("shortlink", true);
        String query = post.get("q", "");
        if (query == null || query.length() == 0)
            query = post.get("query", "");
        query = CharacterCoding.html2unicode(query).replaceAll("\\+", " ");
        final long timeout = (long) post.get("timeout", DAO.getConfig("search.timeout", 2000));
        final int count = post.isDoS_servicereduction() ? SEARCH_LOW_COUNT
                : Math.min(post.get("count", post.get("maximumRecords", SEARCH_DEFAULT_COUNT)),
                        post.isLocalhostAccess() ? SEARCH_MAX_LOCALHOST_COUNT : SEARCH_MAX_PUBLIC_COUNT);
        String source = post.isDoS_servicereduction() ? "cache" : post.get("source", "all"); // possible values: cache, backend, twitter, all
        int agregation_limit = post.get("limit", 10);
        String[] fields = post.get("fields", new String[0], ",");
        int timezoneOffset = post.get("timezoneOffset", 0);
        if (query.indexOf("id:") >= 0 && ("all".equals(source) || "twitter".equals(source)))
            source = "cache"; // id's cannot be retrieved from twitter with the scrape-api (yet), only from the cache
        final String ordername = post.get("order", Timeline.Order.CREATED_AT.getMessageFieldName());
        final Timeline.Order order = Timeline.parseOrder(ordername);

        // create tweet timeline
        final Timeline tl = new Timeline(order);
        JSONObject aggregations = null;
        final QueryEntry.Tokens tokens = new QueryEntry.Tokens(query);

        final AtomicInteger cache_hits = new AtomicInteger(0), count_backend = new AtomicInteger(0),
                count_twitter_all = new AtomicInteger(0), count_twitter_new = new AtomicInteger(0);
        final boolean backend_push = DAO.getConfig("backend.push.enabled", false);

        if ("all".equals(source)) {
            // start all targets for search concurrently
            final int timezoneOffsetf = timezoneOffset;
            final String queryf = query;
            final long start = System.currentTimeMillis();

            // start a scraper
            Thread scraperThread = tokens.raw.length() == 0 ? null : new Thread() {
                public void run() {
                    final String scraper_query = tokens.translate4scraper();
                    DAO.log(request.getServletPath() + " scraping with query: " + scraper_query);
                    Timeline twitterTl = DAO.scrapeTwitter(post, scraper_query, order, timezoneOffsetf, true,
                            timeout, true);
                    count_twitter_new.set(twitterTl.size());
                    tl.putAll(QueryEntry.applyConstraint(twitterTl, tokens, false)); // pre-localized results are not filtered with location constraint any more 
                    tl.setScraperInfo(twitterTl.getScraperInfo());
                    post.recordEvent("twitterscraper_time", System.currentTimeMillis() - start);
                }
            };
            if (scraperThread != null)
                scraperThread.start();

            // start a local search
            Thread localThread = queryf == null || queryf.length() == 0 ? null : new Thread() {
                public void run() {
                    DAO.SearchLocalMessages localSearchResult = new DAO.SearchLocalMessages(queryf, order,
                            timezoneOffsetf,
                            last_cache_search_time.get() > SEARCH_CACHE_THREASHOLD_TIME ? SEARCH_LOW_COUNT
                                    : count,
                            0);
                    long time = System.currentTimeMillis() - start;
                    last_cache_search_time.set(time);
                    post.recordEvent("cache_time", time);
                    cache_hits.set(localSearchResult.timeline.getHits());
                    tl.putAll(localSearchResult.timeline);
                    tl.setResultIndex(localSearchResult.timeline.getResultIndex());
                }
            };
            if (localThread != null)
                localThread.start();

            // start a backend search, but only if backend_push == true or result from scraper is too bad
            boolean start_backend_thread = false;
            if (backend_push)
                start_backend_thread = true;
            else {
                // wait now for termination of scraper thread and local search
                // to evaluate how many results are available
                if (scraperThread != null)
                    try {
                        scraperThread.join(Math.max(10000, timeout - System.currentTimeMillis() + start));
                    } catch (InterruptedException e) {
                    }
                if (localThread != null)
                    try {
                        localThread.join(Math.max(100, timeout - System.currentTimeMillis() + start));
                    } catch (InterruptedException e) {
                    }
                localThread = null;
                scraperThread = null;
                if (tl.size() < count)
                    start_backend_thread = true;
            }
            Thread backendThread = tokens.original.length() == 0 || !start_backend_thread ? null
                    : new Thread() {
                        public void run() {
                            Timeline backendTl = DAO.searchBackend(tokens.original, order, count,
                                    timezoneOffsetf, "cache", timeout);
                            if (backendTl != null) {
                                tl.putAll(QueryEntry.applyConstraint(backendTl, tokens, true));
                                count_backend.set(tl.size());
                                // TODO: read and aggregate aggregations from backend as well
                            }
                            post.recordEvent("backend_time", System.currentTimeMillis() - start);
                        }
                    };
            if (backendThread != null)
                backendThread.start();

            // wait for termination of all threads
            if (scraperThread != null)
                try {
                    scraperThread.join(Math.max(10000, timeout - System.currentTimeMillis() + start));
                } catch (InterruptedException e) {
                }
            if (localThread != null)
                try {
                    localThread.join(Math.max(100, timeout - System.currentTimeMillis() + start));
                } catch (InterruptedException e) {
                }
            if (backendThread != null)
                try {
                    backendThread.join(Math.max(100, timeout - System.currentTimeMillis() + start));
                } catch (InterruptedException e) {
                }

        } else if ("twitter".equals(source) && tokens.raw.length() > 0) {
            final long start = System.currentTimeMillis();
            final String scraper_query = tokens.translate4scraper();
            DAO.log(request.getServletPath() + " scraping with query: " + scraper_query);
            Timeline twitterTl = DAO.scrapeTwitter(post, scraper_query, order, timezoneOffset, true, timeout,
                    true);
            count_twitter_new.set(twitterTl.size());
            tl.putAll(QueryEntry.applyConstraint(twitterTl, tokens, false)); // pre-localized results are not filtered with location constraint any more 
            tl.setScraperInfo(twitterTl.getScraperInfo());
            post.recordEvent("twitterscraper_time", System.currentTimeMillis() - start);
            // in this case we use all tweets, not only the latest one because it may happen that there are no new and that is not what the user expects

        } else if ("cache".equals(source)) {
            final long start = System.currentTimeMillis();
            DAO.SearchLocalMessages localSearchResult = new DAO.SearchLocalMessages(query, order,
                    timezoneOffset,
                    last_cache_search_time.get() > SEARCH_CACHE_THREASHOLD_TIME ? SEARCH_LOW_COUNT : count,
                    agregation_limit, fields);
            cache_hits.set(localSearchResult.timeline.getHits());
            tl.putAll(localSearchResult.timeline);
            tl.setResultIndex(localSearchResult.timeline.getResultIndex());
            aggregations = localSearchResult.getAggregations();
            long time = System.currentTimeMillis() - start;
            last_cache_search_time.set(time);
            post.recordEvent("cache_time", time);

        } else if ("backend".equals(source) && query.length() > 0) {
            final long start = System.currentTimeMillis();
            Timeline backendTl = DAO.searchBackend(query, order, count, timezoneOffset, "cache", timeout);
            if (backendTl != null) {
                tl.putAll(QueryEntry.applyConstraint(backendTl, tokens, true));
                tl.setScraperInfo(backendTl.getScraperInfo());
                // TODO: read and aggregate aggregations from backend as well
                count_backend.set(tl.size());
            }
            post.recordEvent("backend_time", System.currentTimeMillis() - start);

        }

        final long start = System.currentTimeMillis();
        // check the latest user_ids
        DAO.announceNewUserId(tl);

        // reduce the list to the wanted number of results if we have more
        tl.reduceToMaxsize(count);

        // create json or xml according to path extension
        int shortlink_iflinkexceedslength = shortlink_request
                ? (int) DAO.getConfig("shortlink.iflinkexceedslength", 500L)
                : Integer.MAX_VALUE;
        String shortlink_urlstub = DAO.getConfig("shortlink.urlstub", "http://127.0.0.1:9000");
        if (jsonExt) {
            post.setResponse(response, jsonp ? "application/javascript" : "application/json");
            // generate json
            JSONObject m = new JSONObject(true);
            JSONObject metadata = new JSONObject(true);
            if (!minified) {
                m.put("readme_0",
                        "THIS JSON IS THE RESULT OF YOUR SEARCH QUERY - THERE IS NO WEB PAGE WHICH SHOWS THE RESULT!");
                m.put("readme_1",
                        "loklak.org is the framework for a message search system, not the portal, read: http://loklak.org/about.html#notasearchportal");
                m.put("readme_2",
                        "This is supposed to be the back-end of a search portal. For the api, see http://loklak.org/api.html");
                m.put("readme_3",
                        "Parameters q=(query), source=(cache|backend|twitter|all), callback=p for jsonp, maximumRecords=(message count), minified=(true|false)");
            }
            metadata.put("itemsPerPage", Integer.toString(count));
            metadata.put("count", Integer.toString(tl.size()));
            metadata.put("count_twitter_all", count_twitter_all.get());
            metadata.put("count_twitter_new", count_twitter_new.get());
            metadata.put("count_backend", count_backend.get());
            metadata.put("count_cache", cache_hits.get());
            metadata.put("hits", Math.max(cache_hits.get(), tl.size()));
            if (order == Timeline.Order.CREATED_AT)
                metadata.put("period", tl.period());
            metadata.put("query", query);
            metadata.put("client", post.getClientHost());
            metadata.put("time", System.currentTimeMillis() - post.getAccessTime());
            metadata.put("servicereduction", post.isDoS_servicereduction() ? "true" : "false");
            if (tl.getScraperInfo().length() > 0)
                metadata.put("scraperInfo", tl.getScraperInfo());
            if (tl.getResultIndex() != null)
                metadata.put("index", tl.getResultIndex());
            m.put("search_metadata", metadata);
            JSONArray statuses = new JSONArray();
            try {
                for (MessageEntry t : tl) {
                    UserEntry u = tl.getUser(t);
                    if (DAO.getConfig("flag.fixunshorten", false))
                        t.setText(TwitterScraper
                                .unshorten(t.getText(shortlink_iflinkexceedslength, shortlink_urlstub)));
                    statuses.put(t.toJSON(u, true, shortlink_iflinkexceedslength, shortlink_urlstub));
                }
            } catch (ConcurrentModificationException e) {
                // late incoming messages from concurrent peer retrieval may cause this
                // we silently do nothing here and return what we listed so far
            }
            m.put("statuses", statuses);

            // aggregations
            m.put("aggregations", aggregations);

            // write json
            response.setCharacterEncoding("UTF-8");
            PrintWriter sos = response.getWriter();
            if (jsonp)
                sos.print(callback + "(");
            sos.print(m.toString(minified ? 0 : 2));
            if (jsonp)
                sos.println(");");
            sos.println();
        } else if (rssExt) {
            response.setCharacterEncoding("UTF-8");
            post.setResponse(response, "application/rss+xml;charset=utf-8");
            // generate xml
            RSSMessage channel = new RSSMessage();
            channel.setPubDate(new Date());
            channel.setTitle("RSS feed for Twitter search for " + query);
            channel.setDescription("");
            channel.setLink("");
            RSSFeed feed = new RSSFeed(tl.size());
            feed.setChannel(channel);
            try {
                for (MessageEntry t : tl) {
                    UserEntry u = tl.getUser(t);
                    RSSMessage m = new RSSMessage();
                    m.setLink(t.getStatusIdUrl().toExternalForm());
                    m.setAuthor(u.getName() + " @" + u.getScreenName());
                    m.setTitle(u.getName() + " @" + u.getScreenName());
                    m.setDescription(t.getText(shortlink_iflinkexceedslength, shortlink_urlstub));
                    m.setPubDate(t.getCreatedAt());
                    m.setGuid(t.getIdStr());
                    feed.addMessage(m);
                }
            } catch (ConcurrentModificationException e) {
                // late incoming messages from concurrent peer retrieval may cause this
                // we silently do nothing here and return what we listed so far
            }
            String rss = feed.toString();
            //System.out.println("feed has " + feed.size() + " entries");

            // write xml
            response.getOutputStream().write(UTF8.getBytes(rss));
        } else if (txtExt) {
            post.setResponse(response, "text/plain");
            final StringBuilder buffer = new StringBuilder(1000);
            try {
                for (MessageEntry t : tl) {
                    UserEntry u = tl.getUser(t);
                    buffer.append(t.getCreatedAt()).append(" ").append(u.getScreenName()).append(": ")
                            .append(t.getText(shortlink_iflinkexceedslength, shortlink_urlstub)).append('\n');
                }
            } catch (ConcurrentModificationException e) {
                // late incoming messages from concurrent peer retrieval may cause this
                // we silently do nothing here and return what we listed so far
            }
            response.getOutputStream().write(UTF8.getBytes(buffer.toString()));
        }
        post.recordEvent("result_count", tl.size());
        post.recordEvent("postprocessing_time", System.currentTimeMillis() - start);
        JSONObject hits = new JSONObject(true);
        hits.put("count_twitter_all", count_twitter_all.get());
        hits.put("count_twitter_new", count_twitter_new.get());
        hits.put("count_backend", count_backend.get());
        hits.put("cache_hits", cache_hits.get());
        post.recordEvent("hits", hits);
        DAO.log(request.getServletPath() + "?" + request.getQueryString() + " -> " + tl.size()
                + " records returned, " + count_twitter_new.get() + " new");
        post.finalize();
    } catch (Throwable e) {
        Log.getLog().warn(e.getMessage(), e);
        //Log.getLog().warn(e);
    }
}

From source file:com.microsoft.onedrive.apiexplorer.ItemFragment.java

/**
 * Creates a link on this item//from w w  w . jav a  2  s  .c o  m
 * @param item The item to delete
 */
private void createLink(final Item item) {
    final CharSequence[] items = { "view", "edit" };
    final int nothingSelected = -1;
    final AtomicInteger selection = new AtomicInteger(nothingSelected);
    final AlertDialog alertDialog = new AlertDialog.Builder(getActivity()).setTitle(R.string.create_link)
            .setIcon(android.R.drawable.ic_menu_share)
            .setPositiveButton(R.string.create_link, new DialogInterface.OnClickListener() {
                @Override
                public void onClick(final DialogInterface dialog, final int which) {
                    if (selection.get() == nothingSelected) {
                        return;
                    }

                    final BaseApplication application = (BaseApplication) getActivity().getApplication();
                    application.getOneDriveClient().getDrive().getItems(item.id)
                            .getCreateLink(items[selection.get()].toString()).buildRequest()
                            .create(new DefaultCallback<Permission>(getActivity()) {
                                @Override
                                public void success(final Permission permission) {
                                    final ClipboardManager cm = (ClipboardManager) getActivity()
                                            .getSystemService(Context.CLIPBOARD_SERVICE);
                                    final ClipData data = ClipData.newPlainText("Link Url",
                                            permission.link.webUrl);
                                    cm.setPrimaryClip(data);
                                    Toast.makeText(getActivity(), application.getString(R.string.created_link),
                                            Toast.LENGTH_LONG).show();
                                    getActivity().onBackPressed();
                                }
                            });
                }
            }).setSingleChoiceItems(items, 0, new DialogInterface.OnClickListener() {
                @Override
                public void onClick(final DialogInterface dialog, final int which) {
                    selection.set(which);
                }
            }).setNegativeButton(R.string.cancel, new DialogInterface.OnClickListener() {
                @Override
                public void onClick(final DialogInterface dialog, final int which) {
                    dialog.cancel();
                }
            }).create();
    alertDialog.show();
}

From source file:xc.mst.repo.RepositoryDAO.java

public boolean checkOutsideRange(String sql, AtomicInteger tally, AtomicInteger numMatching) {
    // could LIMIT accomplish the same thing as explain ?
    // no - I dont think so, because if the answer is zero - limit takes a looooong time

    List<Map<String, Object>> records = this.jdbcTemplate.queryForList("explain " + sql);

    BigInteger rows2examine = (BigInteger) records.get(0).get("rows");
    LOG.debug("rows: " + rows2examine);
    if (rows2examine == null) {
        return false;
    } else if (rows2examine.intValue() > getEstimateCompleteListSizeThreshold()) {
        if (tally != null) {
            tally.addAndGet(2);//from   ww  w .  j a v  a2 s .c  o  m
        }
        if (numMatching != null) {
            numMatching.set(rows2examine.intValue());
        }
    } else {
        int exactCount = this.jdbcTemplate.queryForInt(sql);
        if (exactCount > 0) {
            if (tally != null) {
                tally.addAndGet(1);
            }
            if (numMatching != null) {
                numMatching.set(exactCount);
            }
        }
    }

    if (tally != null)
        LOG.debug("tally: " + tally.get());
    if (numMatching != null)
        LOG.debug("numMatching: " + numMatching.get());

    return true;
}

From source file:io.uploader.drive.drive.media.MediaHttpUploader.java

/**
 * Uploads the media in a resumable manner.
 *
 * @param initiationRequestUrl//from ww  w. j  a  va2  s .c om
 *            The request URL where the initiation request will be sent
 * @return HTTP response
 */
// https://developers.google.com/drive/web/manage-uploads#resumable
private HttpResponse resumableUpload(GenericUrl initiationRequestUrl) throws IOException {

    // Make initial request to get the unique upload URL.
    HttpResponse initialResponse = executeUploadInitiation(initiationRequestUrl);
    if (!initialResponse.isSuccessStatusCode()) {
        // If the initiation request is not successful return it
        // immediately.
        logger.info("Unsuccessful: " + initialResponse.getStatusMessage());

        return initialResponse;
    }
    GenericUrl uploadUrl;
    try {
        uploadUrl = new GenericUrl(initialResponse.getHeaders().getLocation());
    } finally {
        initialResponse.disconnect();
    }

    // Convert media content into a byte stream to upload in chunks.
    contentInputStream = mediaContent.getInputStream();
    if (!contentInputStream.markSupported() && isMediaLengthKnown()) {
        // If we know the media content length then wrap the stream into a
        // Buffered input stream to
        // support the {@link InputStream#mark} and {@link
        // InputStream#reset} methods required for
        // handling server errors.
        contentInputStream = new BufferedInputStream(contentInputStream);
    }

    HttpResponse response = null;
    // Upload the media content in chunks.
    while (true) {
        currentRequest = requestFactory.buildPutRequest(uploadUrl, null);
        setContentAndHeadersOnCurrentRequest();
        // set mediaErrorHandler as I/O exception handler and as
        // unsuccessful response handler for
        // calling to serverErrorCallback on an I/O exception or an abnormal
        // HTTP response
        AtomicInteger httpErrorCounter = new AtomicInteger(0);
        new MediaUploadErrorHandler(this, currentRequest, httpErrorCounter);

        AtomicInteger tryCounter = new AtomicInteger(0);
        while (true) {
            try {
                if (isMediaLengthKnown()) {
                    // TODO(rmistry): Support gzipping content for the case where
                    // media content length is
                    // known
                    // (https://code.google.com/p/google-api-java-client/issues/detail?id=691).
                    response = executeCurrentRequestWithoutGZip(currentRequest);
                    break;
                } else {
                    response = executeCurrentRequest(currentRequest);
                    break;
                }
            } catch (Throwable e) {
                logger.error("Error occurred while uploading", e);
                if (tryCounter.getAndIncrement() >= 5) {
                    logger.error("Could not be recovered...");
                    throw e;
                }
                logger.error("Retry (" + tryCounter.get() + " times)", e);
            }
        }

        boolean returningResponse = false;
        try {
            if (response.isSuccessStatusCode()) {
                totalBytesServerReceived = getMediaContentLength();
                if (mediaContent.getCloseInputStream()) {
                    contentInputStream.close();
                }
                updateStateAndNotifyListener(UploadState.MEDIA_COMPLETE);
                returningResponse = true;
                return response;
            }

            int statusCode = response.getStatusCode();
            if (statusCode != 308) {
                // https://developers.google.com/drive/web/manage-uploads#resume-upload
                returningResponse = true;
                return response;
            } else {
                httpErrorCounter.set(0);
            }

            // Check to see if the upload URL has changed on the server.
            String updatedUploadUrl = response.getHeaders().getLocation();
            if (updatedUploadUrl != null) {
                uploadUrl = new GenericUrl(updatedUploadUrl);
            }

            // we check the amount of bytes the server received so far,
            // because the server may process
            // fewer bytes than the amount of bytes the client had sent
            long newBytesServerReceived = getNextByteIndex(response.getHeaders().getRange());
            // the server can receive any amount of bytes from 0 to current
            // chunk length
            long currentBytesServerReceived = newBytesServerReceived - totalBytesServerReceived;
            Preconditions.checkState(
                    currentBytesServerReceived >= 0 && currentBytesServerReceived <= currentChunkLength);
            long copyBytes = currentChunkLength - currentBytesServerReceived;
            if (isMediaLengthKnown()) {
                if (copyBytes > 0) {
                    // If the server didn't receive all the bytes the client
                    // sent the current position of
                    // the input stream is incorrect. So we should reset the
                    // stream and skip those bytes
                    // that the server had already received.
                    // Otherwise (the server got all bytes the client sent),
                    // the stream is in its right
                    // position, and we can continue from there
                    contentInputStream.reset();
                    long actualSkipValue = contentInputStream.skip(currentBytesServerReceived);
                    Preconditions.checkState(currentBytesServerReceived == actualSkipValue);
                }
            } else if (copyBytes == 0) {
                // server got all the bytes, so we don't need to use this
                // buffer. Otherwise, we have to
                // keep the buffer and copy part (or all) of its bytes to
                // the stream we are sending to the
                // server
                currentRequestContentBuffer = null;
            }
            totalBytesServerReceived = newBytesServerReceived;

            updateStateAndNotifyListener(UploadState.MEDIA_IN_PROGRESS);
        } finally {
            if (!returningResponse) {
                response.disconnect();
            }
        }
    }
}

From source file:de.bund.bfr.knime.pmm.common.math.ParameterOptimizer.java

public void optimize(AtomicInteger progress, int nParameterSpace, int nLevenberg, boolean stopWhenSuccessful) {
    List<Double> paramMin = new ArrayList<>();
    List<Integer> paramStepCount = new ArrayList<>();
    List<Double> paramStepSize = new ArrayList<>();
    int maxCounter = 1;
    int paramsWithRange = 0;
    int maxStepCount = 10;

    for (int i = 0; i < parameters.size(); i++) {
        Double min = minParameterValues.get(i);
        Double max = maxParameterValues.get(i);

        if (min != null && max != null) {
            paramsWithRange++;// ww w  .  ja  va2s  .c  o  m
        }
    }

    if (paramsWithRange != 0) {
        maxStepCount = (int) Math.pow(nParameterSpace, 1.0 / paramsWithRange);
        maxStepCount = Math.max(maxStepCount, 2);
        maxStepCount = Math.min(maxStepCount, 10);
    }

    for (int i = 0; i < parameters.size(); i++) {
        Double min = minParameterValues.get(i);
        Double max = maxParameterValues.get(i);

        if (min != null && max != null) {
            paramMin.add(min);
            paramStepCount.add(maxStepCount);
            maxCounter *= maxStepCount;

            if (max > min) {
                paramStepSize.add((max - min) / (maxStepCount - 1));
            } else {
                paramStepSize.add(1.0);
            }
        } else if (min != null) {
            if (min != 0.0) {
                paramMin.add(min);
            } else {
                paramMin.add(MathUtilities.EPSILON);
            }

            paramStepCount.add(1);
            paramStepSize.add(1.0);
        } else if (max != null) {
            if (max != 0.0) {
                paramMin.add(max);
            } else {
                paramMin.add(-MathUtilities.EPSILON);
            }

            paramStepCount.add(1);
            paramStepSize.add(1.0);
        } else {
            paramMin.add(MathUtilities.EPSILON);
            paramStepCount.add(1);
            paramStepSize.add(1.0);
        }
    }

    List<List<Double>> bestValues = new ArrayList<>();
    List<Double> bestError = new ArrayList<>();

    for (int i = 0; i < nLevenberg; i++) {
        bestValues.add(new ArrayList<>(Collections.nCopies(parameters.size(), i + 1.0)));
        bestError.add(Double.POSITIVE_INFINITY);
    }

    List<Integer> paramStepIndex = new ArrayList<>(Collections.nCopies(parameters.size(), 0));
    boolean done = false;
    int counter = 0;

    while (!done) {
        progress.set(Float.floatToIntBits((float) counter / (float) maxCounter));
        counter++;

        List<Double> values = new ArrayList<>();
        double error = 0.0;

        for (int i = 0; i < parameters.size(); i++) {
            double value = paramMin.get(i) + paramStepIndex.get(i) * paramStepSize.get(i);

            values.add(value);
            parser.setVarValue(parameters.get(i), value);
        }

        for (int i = 0; i < targetValues.size(); i++) {
            for (int j = 0; j < arguments.size(); j++) {
                parser.setVarValue(arguments.get(j), argumentValues.get(j).get(i));
            }

            try {
                double value = (Double) parser.evaluate(function);
                double diff = targetValues.get(i) - value;

                error += diff * diff;
            } catch (ParseException e) {
                e.printStackTrace();
            } catch (ClassCastException e) {
                error = Double.POSITIVE_INFINITY;
                break;
            }
        }

        for (int i = nLevenberg; i >= 0; i--) {
            if (i == 0 || !(error < bestError.get(i - 1))) {
                if (i != nLevenberg) {
                    bestError.add(i, error);
                    bestValues.add(i, values);
                    bestError.remove(nLevenberg);
                    bestValues.remove(nLevenberg);
                }

                break;
            }
        }

        for (int i = 0;; i++) {
            if (i >= parameters.size()) {
                done = true;
                break;
            }

            paramStepIndex.set(i, paramStepIndex.get(i) + 1);

            if (paramStepIndex.get(i) >= paramStepCount.get(i)) {
                paramStepIndex.set(i, 0);
            } else {
                break;
            }
        }
    }

    successful = false;

    for (List<Double> startValues : bestValues) {
        try {
            optimize(startValues);

            double cost = optimizerValues.getCost();

            if (!successful || cost * cost < sse) {
                useCurrentResults(startValues);

                if (rSquare != 0.0) {
                    successful = true;

                    if (stopWhenSuccessful) {
                        break;
                    }
                }
            }
        } catch (TooManyEvaluationsException e) {
            break;
        } catch (ConvergenceException e) {
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}

From source file:org.apache.hadoop.hdfs.TestAutoEditRollWhenAvatarFailover.java

/**
 * Test if we can get block locations after killing primary avatar,
 * failing over to standby avatar (making it the new primary),
 * restarting a new standby avatar, killing the new primary avatar and
 * failing over to the restarted standby.
 * /*from   w w w  .  j  a  va  2 s  .c  o  m*/
 * Write logs for a while to make sure automatic rolling are triggered.
 */
@Test
public void testDoubleFailOverWithAutomaticRoll() throws Exception {
    setUp(false, "testDoubleFailOverWithAutomaticRoll");

    // To make sure it's never the case that both primary and standby
    // issue rolling, we use a injection handler. 
    final AtomicBoolean startKeepThread = new AtomicBoolean(true);
    final AtomicInteger countAutoRolled = new AtomicInteger(0);
    final AtomicBoolean needFail = new AtomicBoolean(false);
    final AtomicLong currentThreadId = new AtomicLong(-1);
    final Object waitFor10Rolls = new Object();
    InjectionHandler.set(new InjectionHandler() {
        @Override
        protected void _processEvent(InjectionEventI event, Object... args) {
            if (event == InjectionEvent.FSEDIT_AFTER_AUTOMATIC_ROLL) {
                countAutoRolled.incrementAndGet();
                if (countAutoRolled.get() >= 10) {
                    synchronized (waitFor10Rolls) {
                        waitFor10Rolls.notifyAll();
                    }
                }

                if (!startKeepThread.get()) {
                    currentThreadId.set(-1);
                } else if (currentThreadId.get() == -1) {
                    currentThreadId.set(Thread.currentThread().getId());
                } else if (currentThreadId.get() != Thread.currentThread().getId()) {
                    LOG.warn("[Thread " + Thread.currentThread().getId() + "] expected: " + currentThreadId);
                    needFail.set(true);
                }

                LOG.info("[Thread " + Thread.currentThread().getId() + "] finish automatic log rolling, count "
                        + countAutoRolled.get());

                // Increase the rolling time a little bit once after 7 auto rolls 
                if (countAutoRolled.get() % 7 == 3) {
                    DFSTestUtil.waitNMilliSecond(75);
                }
            }
        }
    });

    FileSystem fs = cluster.getFileSystem();

    // Add some transactions during a period of time before failing over.
    long startTime = System.currentTimeMillis();
    for (int i = 0; i < 100; i++) {
        fs.setTimes(new Path("/"), 0, 0);
        DFSTestUtil.waitNMilliSecond(100);
        if (i % 10 == 0) {
            LOG.info("================== executed " + i + " queries");
        }
        if (countAutoRolled.get() >= 10) {
            LOG.info("Automatic rolled 10 times.");
            long duration = System.currentTimeMillis() - startTime;
            TestCase.assertTrue("Automatic rolled 10 times in just " + duration + " msecs, which is too short",
                    duration > 4500);
            break;
        }
    }
    TestCase.assertTrue("Only " + countAutoRolled + " automatic rolls triggered, which is lower than expected.",
            countAutoRolled.get() >= 10);

    // Tune the rolling timeout temporarily to avoid race conditions
    // only triggered in tests
    cluster.getPrimaryAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(5000);
    cluster.getStandbyAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(5000);

    LOG.info("================== killing primary 1");

    cluster.killPrimary();

    // Fail over and make sure after fail over, automatic edits roll still
    // will happen.
    countAutoRolled.set(0);
    startKeepThread.set(false);
    currentThreadId.set(-1);
    LOG.info("================== failing over 1");
    cluster.failOver();
    cluster.getPrimaryAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(1000);
    LOG.info("================== restarting standby");
    cluster.restartStandby();
    cluster.getStandbyAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(1000);
    LOG.info("================== Finish restarting standby");

    // Wait for automatic rolling happens if there is no new transaction.
    startKeepThread.set(true);

    startTime = System.currentTimeMillis();
    long waitDeadLine = startTime + 20000;
    synchronized (waitFor10Rolls) {
        while (System.currentTimeMillis() < waitDeadLine && countAutoRolled.get() < 10) {
            waitFor10Rolls.wait(waitDeadLine - System.currentTimeMillis());
        }
    }
    TestCase.assertTrue("Only " + countAutoRolled + " automatic rolls triggered, which is lower than expected.",
            countAutoRolled.get() >= 10);
    long duration = System.currentTimeMillis() - startTime;
    TestCase.assertTrue("Automatic rolled 10 times in just " + duration + " msecs", duration > 9000);

    // failover back 
    countAutoRolled.set(0);
    startKeepThread.set(false);
    currentThreadId.set(-1);

    cluster.getPrimaryAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(6000);
    cluster.getStandbyAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(6000);

    LOG.info("================== killing primary 2");
    cluster.killPrimary();
    LOG.info("================== failing over 2");
    cluster.failOver();

    cluster.getPrimaryAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(1000);

    // Make sure after failover back, automatic rolling can still happen.
    startKeepThread.set(true);

    for (int i = 0; i < 100; i++) {
        fs.setTimes(new Path("/"), 0, 0);
        DFSTestUtil.waitNMilliSecond(200);
        if (i % 10 == 0) {
            LOG.info("================== executed " + i + " queries");
        }
        if (countAutoRolled.get() > 10) {
            LOG.info("Automatic rolled 10 times.");
            duration = System.currentTimeMillis() - startTime;
            TestCase.assertTrue("Automatic rolled 10 times in just " + duration + " msecs, which is too short",
                    duration > 9000);
            break;
        }
    }
    TestCase.assertTrue("Only " + countAutoRolled + " automatic rolls triggered, which is lower than expected.",
            countAutoRolled.get() >= 10);

    InjectionHandler.clear();

    if (needFail.get()) {
        TestCase.fail("Automatic rolling doesn't happen in the same thread when should.");
    }
}

From source file:org.springframework.integration.ip.udp.DatagramPacketMulticastSendingHandlerTests.java

@Test
public void verifySendMulticastWithAcks() throws Exception {

    MulticastSocket socket;/*  w  ww. j a  v a2 s. c o  m*/
    try {
        socket = new MulticastSocket();
    } catch (Exception e) {
        return;
    }
    final int testPort = socket.getLocalPort();
    final AtomicInteger ackPort = new AtomicInteger();

    final String multicastAddress = "225.6.7.8";
    final String payload = "foobar";
    final CountDownLatch listening = new CountDownLatch(2);
    final CountDownLatch ackListening = new CountDownLatch(1);
    final CountDownLatch ackSent = new CountDownLatch(2);
    Runnable catcher = () -> {
        try {
            byte[] buffer = new byte[1000];
            DatagramPacket receivedPacket = new DatagramPacket(buffer, buffer.length);
            MulticastSocket socket1 = new MulticastSocket(testPort);
            socket1.setInterface(InetAddress.getByName(multicastRule.getNic()));
            socket1.setSoTimeout(8000);
            InetAddress group = InetAddress.getByName(multicastAddress);
            socket1.joinGroup(group);
            listening.countDown();
            assertTrue(ackListening.await(10, TimeUnit.SECONDS));
            LogFactory.getLog(getClass()).debug(Thread.currentThread().getName() + " waiting for packet");
            socket1.receive(receivedPacket);
            socket1.close();
            byte[] src = receivedPacket.getData();
            int length = receivedPacket.getLength();
            int offset = receivedPacket.getOffset();
            byte[] dest = new byte[6];
            System.arraycopy(src, offset + length - 6, dest, 0, 6);
            assertEquals(payload, new String(dest));
            LogFactory.getLog(getClass()).debug(Thread.currentThread().getName() + " received packet");
            DatagramPacketMessageMapper mapper = new DatagramPacketMessageMapper();
            mapper.setAcknowledge(true);
            mapper.setLengthCheck(true);
            Message<byte[]> message = mapper.toMessage(receivedPacket);
            Object id = message.getHeaders().get(IpHeaders.ACK_ID);
            byte[] ack = id.toString().getBytes();
            DatagramPacket ackPack = new DatagramPacket(ack, ack.length,
                    new InetSocketAddress(multicastRule.getNic(), ackPort.get()));
            DatagramSocket out = new DatagramSocket();
            out.send(ackPack);
            LogFactory.getLog(getClass())
                    .debug(Thread.currentThread().getName() + " sent ack to " + ackPack.getSocketAddress());
            out.close();
            ackSent.countDown();
            socket1.close();
        } catch (Exception e) {
            listening.countDown();
            e.printStackTrace();
        }
    };
    Executor executor = Executors.newFixedThreadPool(2);
    executor.execute(catcher);
    executor.execute(catcher);
    assertTrue(listening.await(10000, TimeUnit.MILLISECONDS));
    MulticastSendingMessageHandler handler = new MulticastSendingMessageHandler(multicastAddress, testPort,
            true, true, "localhost", 0, 10000);
    handler.setLocalAddress(this.multicastRule.getNic());
    handler.setMinAcksForSuccess(2);
    handler.setBeanFactory(mock(BeanFactory.class));
    handler.afterPropertiesSet();
    handler.start();
    waitAckListening(handler);
    ackPort.set(handler.getAckPort());
    ackListening.countDown();
    handler.handleMessage(MessageBuilder.withPayload(payload).build());
    assertTrue(ackSent.await(10000, TimeUnit.MILLISECONDS));
    handler.stop();
    socket.close();
}

From source file:org.apache.bookkeeper.replication.Auditor.java

/**
 * List all the ledgers and check them individually. This should not
 * be run very often.//from   www.  j a va 2 s  . c om
 */
void checkAllLedgers()
        throws BKAuditException, BKException, IOException, InterruptedException, KeeperException {
    ZooKeeper newzk = ZooKeeperClient.newBuilder().connectString(conf.getZkServers())
            .sessionTimeoutMs(conf.getZkTimeout()).build();

    final BookKeeper client = new BookKeeper(new ClientConfiguration(conf), newzk);
    final BookKeeperAdmin admin = new BookKeeperAdmin(client, statsLogger);

    try {
        final LedgerChecker checker = new LedgerChecker(client);

        final AtomicInteger returnCode = new AtomicInteger(BKException.Code.OK);
        final CountDownLatch processDone = new CountDownLatch(1);

        Processor<Long> checkLedgersProcessor = new Processor<Long>() {
            @Override
            public void process(final Long ledgerId, final AsyncCallback.VoidCallback callback) {
                try {
                    if (!ledgerUnderreplicationManager.isLedgerReplicationEnabled()) {
                        LOG.info("Ledger rereplication has been disabled, aborting periodic check");
                        processDone.countDown();
                        return;
                    }
                } catch (ReplicationException.UnavailableException ue) {
                    LOG.error("Underreplication manager unavailable " + "running periodic check", ue);
                    processDone.countDown();
                    return;
                }

                LedgerHandle lh = null;
                try {
                    lh = admin.openLedgerNoRecovery(ledgerId);
                    checker.checkLedger(lh, new ProcessLostFragmentsCb(lh, callback));
                    // we collect the following stats to get a measure of the
                    // distribution of a single ledger within the bk cluster
                    // the higher the number of fragments/bookies, the more distributed it is
                    numFragmentsPerLedger.registerSuccessfulValue(lh.getNumFragments());
                    numBookiesPerLedger.registerSuccessfulValue(lh.getNumBookies());
                    numLedgersChecked.inc();
                } catch (BKException.BKNoSuchLedgerExistsException bknsle) {
                    LOG.debug("Ledger was deleted before we could check it", bknsle);
                    callback.processResult(BKException.Code.OK, null, null);
                    return;
                } catch (BKException bke) {
                    LOG.error("Couldn't open ledger " + ledgerId, bke);
                    callback.processResult(BKException.Code.BookieHandleNotAvailableException, null, null);
                    return;
                } catch (InterruptedException ie) {
                    LOG.error("Interrupted opening ledger", ie);
                    Thread.currentThread().interrupt();
                    callback.processResult(BKException.Code.InterruptedException, null, null);
                    return;
                } finally {
                    if (lh != null) {
                        try {
                            lh.close();
                        } catch (BKException bke) {
                            LOG.warn("Couldn't close ledger " + ledgerId, bke);
                        } catch (InterruptedException ie) {
                            LOG.warn("Interrupted closing ledger " + ledgerId, ie);
                            Thread.currentThread().interrupt();
                        }
                    }
                }
            }
        };

        ledgerManager.asyncProcessLedgers(checkLedgersProcessor, new AsyncCallback.VoidCallback() {
            @Override
            public void processResult(int rc, String s, Object obj) {
                returnCode.set(rc);
                processDone.countDown();
            }
        }, null, BKException.Code.OK, BKException.Code.ReadException);
        try {
            processDone.await();
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            throw new BKAuditException("Exception while checking ledgers", e);
        }
        if (returnCode.get() != BKException.Code.OK) {
            throw BKException.create(returnCode.get());
        }
    } finally {
        admin.close();
        client.close();
        newzk.close();
    }
}

From source file:org.neo4j.io.pagecache.PageCacheTest.java

@Test(timeout = SEMI_LONG_TIMEOUT_MILLIS)
public void mustSyncDeviceWhenFlushAndForcingPageCache() throws Exception {
    AtomicInteger syncDeviceCounter = new AtomicInteger();
    AtomicInteger expectedCountInForce = new AtomicInteger();
    Queue<Integer> expectedCountsInForce = queue(0, 0, // `cache.flushAndForce` forces the individual files, no `syncDevice` yet
            1, 2); // after test, files are closed+forced one by one
    PageSwapperFactory factory = factoryCountingSyncDevice(syncDeviceCounter, expectedCountsInForce);
    try (PageCache cache = createPageCache(factory, maxPages, pageCachePageSize, PageCacheTracer.NULL);
            PagedFile p1 = cache.map(existingFile("a"), filePageSize);
            PagedFile p2 = cache.map(existingFile("b"), filePageSize)) {
        try (PageCursor cursor = p1.io(0, PF_SHARED_WRITE_LOCK)) {
            assertTrue(cursor.next());/*from  ww  w  .j a  v  a2s. co m*/
        }
        try (PageCursor cursor = p2.io(0, PF_SHARED_WRITE_LOCK)) {
            assertTrue(cursor.next());
        }

        cache.flushAndForce();
        expectedCountInForce.set(1);
        assertThat(syncDeviceCounter.get(), is(1));
    }
}

From source file:org.neo4j.io.pagecache.PageCacheTest.java

@Test(timeout = SEMI_LONG_TIMEOUT_MILLIS)
public void mustSyncDeviceWhenFlushAndForcingPagedFile() throws Exception {
    AtomicInteger syncDeviceCounter = new AtomicInteger();
    AtomicInteger expectedCountInForce = new AtomicInteger();
    Queue<Integer> expectedCountsInForce = queue(0, // at `p1.flushAndForce` no `syncDevice` has happened before the force
            1, 2); // closing+forcing the files one by one, we get 2 more `syncDevice`
    PageSwapperFactory factory = factoryCountingSyncDevice(syncDeviceCounter, expectedCountsInForce);
    try (PageCache cache = createPageCache(factory, maxPages, pageCachePageSize, PageCacheTracer.NULL);
            PagedFile p1 = cache.map(existingFile("a"), filePageSize);
            PagedFile p2 = cache.map(existingFile("b"), filePageSize)) {
        try (PageCursor cursor = p1.io(0, PF_SHARED_WRITE_LOCK)) {
            assertTrue(cursor.next());/*from w  w  w. j a v a  2s.co m*/
        }
        try (PageCursor cursor = p2.io(0, PF_SHARED_WRITE_LOCK)) {
            assertTrue(cursor.next());
        }

        p1.flushAndForce();
        expectedCountInForce.set(1);
        assertThat(syncDeviceCounter.get(), is(1));
    }
}