Example usage for java.util.concurrent ConcurrentMap values

List of usage examples for java.util.concurrent ConcurrentMap values

Introduction

In this page you can find the example usage for java.util.concurrent ConcurrentMap values.

Prototype

Collection<V> values();

Source Link

Document

Returns a Collection view of the values contained in this map.

Usage

From source file:com.jivesoftware.os.upena.deployable.UpenaMain.java

public void run(String[] args) throws Exception {

    HealthFactory.initialize(BindInterfaceToConfiguration::bindDefault, new HealthCheckRegistry() {

        @Override//from  w w  w  . j av a 2  s.c  o m
        public void register(HealthChecker<?> healthChecker) {

        }

        @Override
        public void unregister(HealthChecker<?> healthChecker) {

        }
    });

    Properties buildProperties = new Properties();
    String upenaVersion = "";
    try {
        buildProperties.load(UpenaMain.class.getClassLoader().getResourceAsStream("build.properties"));
        upenaVersion = buildProperties.getProperty("my.version", "") + " "
                + buildProperties.getProperty("my.timestamp", "") + " sha:"
                + buildProperties.getProperty("git.commit.id", "");
    } catch (Exception x) {
        LOG.warn("Failed to locate build.properties");
    }

    String workingDir = System.getProperty("user.dir");
    long start = System.currentTimeMillis();
    Exception failed = null;
    while (start + TimeUnit.SECONDS.toMillis(10) > System.currentTimeMillis()) {
        try {
            File lockFile = new File(workingDir, "onlyLetOneRunningAtATime");
            lockFile.createNewFile();
            FileChannel.open(lockFile.toPath(), StandardOpenOption.WRITE).lock();
            failed = null;
            break;
        } catch (Exception x) {
            failed = x;
            LOG.warn("Failed to acquire lock on onlyLetOneRunningAtATime", x);
            Thread.sleep(1000);
        }
    }
    if (failed != null) {
        throw failed;
    }

    JDIAPI jvmapi = null;
    try {
        jvmapi = new JDIAPI();
    } catch (NoClassDefFoundError x) {
        LOG.warn(
                "Failed to local tools.jar. Please manually add to classpath. Breakpoint debugger will be disabled.");
    }

    String hostname = args[0];

    int loopbackPort = Integer.parseInt(System.getProperty("amza.loopback.port", "1174"));
    int port = Integer.parseInt(System.getProperty("amza.port", "1175"));
    String multicastGroup = System.getProperty("amza.discovery.group", "225.4.5.6");
    int multicastPort = Integer.parseInt(System.getProperty("amza.discovery.port", "1123"));
    String clusterDiscoveryName = (args.length > 1 ? args[1] : null);

    String datacenter = System.getProperty("host.datacenter", "unknownDatacenter");
    String rack = System.getProperty("host.rack", "unknownRack");
    String publicHost = System.getProperty("public.host.name", hostname);

    UpenaRingHost ringHost = new UpenaRingHost(hostname, port); // TODO include rackId

    // todo need a better way to create writer id.
    int writerId = new Random().nextInt(512);
    TimestampedOrderIdProvider orderIdProvider = new OrderIdProviderImpl(
            new ConstantWriterIdProvider(writerId));

    ObjectMapper mapper = new ObjectMapper();
    mapper.configure(SerializationFeature.INDENT_OUTPUT, true);
    mapper.configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false);
    mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);

    RowsStorageProvider rowsStorageProvider = rowsStorageProvider(orderIdProvider);

    boolean sslEnable = Boolean.parseBoolean(System.getProperty("ssl.enabled", "true"));
    String sslKeystorePassword = System.getProperty("ssl.keystore.password", "password");
    String sslKeystorePath = System.getProperty("ssl.keystore.path", "./certs/sslKeystore");
    String sslKeyStoreAlias = System.getProperty("ssl.keystore.alias", "upenanode").toLowerCase();
    boolean sslAutoGenerateSelfSignedCert = Boolean
            .parseBoolean(System.getProperty("ssl.keystore.autoGenerate", "true"));

    File sslKeystore = new File(sslKeystorePath);
    if (sslEnable) {
        SelfSigningCertGenerator selfSigningCertGenerator = new SelfSigningCertGenerator();
        if (sslKeystore.exists()) {
            if (!selfSigningCertGenerator.validate(sslKeyStoreAlias, sslKeystorePassword, sslKeystore)) {
                LOG.error("SSL keystore validation failed. keyStoreAlias:{} sslKeystore:{}", sslKeyStoreAlias,
                        sslKeystore);
                System.exit(1);
            }
        } else {
            sslKeystore.getParentFile().mkdirs();
            if (sslAutoGenerateSelfSignedCert) {
                selfSigningCertGenerator.create(sslKeyStoreAlias, sslKeystorePassword, sslKeystore);
            } else {
                LOG.error("Failed to locate mandatory sslKeystore:{}", sslKeystore);
                System.exit(1);
            }
        }
    }

    String consumerKey = System.getProperty("upena.consumerKey", clusterDiscoveryName);
    if (consumerKey == null) {
        consumerKey = "upena";
        LOG.warn("Please provide a stronger consumerKey via -Dupena.consumerKey");
    }
    String finalConsumerKey = consumerKey;

    String secret = System.getProperty("upena.secret");
    if (secret == null) {
        secret = "secret";
        LOG.warn("Please provide a stronger secret via -Dupena.secret");
    }
    String finalSecret = secret;

    OAuthSigner authSigner = (request) -> {
        CommonsHttpOAuthConsumer oAuthConsumer = new CommonsHttpOAuthConsumer(finalConsumerKey, finalSecret);
        oAuthConsumer.setMessageSigner(new HmacSha1MessageSigner());
        oAuthConsumer.setTokenWithSecret(finalConsumerKey, finalSecret);
        return oAuthConsumer.sign(request);
    };
    UpenaSSLConfig upenaSSLConfig = new UpenaSSLConfig(sslEnable, sslAutoGenerateSelfSignedCert, authSigner);

    UpenaAmzaService upenaAmzaService = null;
    if (!new File("./state").exists()) {
        UpdatesSender changeSetSender = new HttpUpdatesSender(sslEnable, sslAutoGenerateSelfSignedCert,
                authSigner);
        UpdatesTaker tableTaker = new HttpUpdatesTaker(sslEnable, sslAutoGenerateSelfSignedCert, authSigner);

        UpenaAmzaServiceConfig upenaAmzaServiceConfig = new UpenaAmzaServiceConfig();
        upenaAmzaService = new UpenaAmzaServiceInitializer().initialize(upenaAmzaServiceConfig, orderIdProvider,
                new com.jivesoftware.os.upena.amza.storage.FstMarshaller(
                        FSTConfiguration.getDefaultConfiguration()),
                rowsStorageProvider, rowsStorageProvider, rowsStorageProvider, changeSetSender, tableTaker,
                Optional.<SendFailureListener>absent(), Optional.<UpenaTakeFailureListener>absent(),
                (changes) -> {
                });

        /*upenaAmzaService.start(ringHost, upenaAmzaServiceConfig.resendReplicasIntervalInMillis,
            upenaAmzaServiceConfig.applyReplicasIntervalInMillis,
            upenaAmzaServiceConfig.takeFromNeighborsIntervalInMillis,
            upenaAmzaServiceConfig.checkIfCompactionIsNeededIntervalInMillis,
            upenaAmzaServiceConfig.compactTombstoneIfOlderThanNMillis);*/

        LOG.info("-----------------------------------------------------------------------");
        LOG.info("|      OLD Amza Service Online");
        LOG.info("-----------------------------------------------------------------------");
    } else {
        LOG.info("-----------------------------------------------------------------------");
        LOG.info("|      OLD Amza Data is decomissionable");
        LOG.info("-----------------------------------------------------------------------");
    }

    BAInterner baInterner = new BAInterner();

    AtomicReference<Callable<RingTopology>> topologyProvider = new AtomicReference<>(); // bit of a hack
    InstanceDescriptor instanceDescriptor = new InstanceDescriptor(datacenter, rack, "", "", "", "", "", "", "",
            "", 0, "", "", "", 0L, true);
    ConnectionDescriptorsProvider noAuthConnectionsProvider = (connectionDescriptorsRequest,
            expectedReleaseGroup) -> {
        try {
            RingTopology systemRing = topologyProvider.get().call();
            List<ConnectionDescriptor> descriptors = Lists.newArrayList(Iterables.transform(systemRing.entries,
                    input -> new ConnectionDescriptor(instanceDescriptor, sslEnable, false,
                            new HostPort(input.ringHost.getHost(), input.ringHost.getPort()),
                            Collections.emptyMap(), Collections.emptyMap())));
            return new ConnectionDescriptorsResponse(200, Collections.emptyList(), "", descriptors,
                    connectionDescriptorsRequest.getRequestUuid());
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    };

    TenantsServiceConnectionDescriptorProvider<String> noAuthConnectionPoolProvider = new TenantsServiceConnectionDescriptorProvider<>(
            Executors.newScheduledThreadPool(1), "", noAuthConnectionsProvider, "", "", 10_000); // TODO config
    noAuthConnectionPoolProvider.start();

    ConnectionDescriptorsProvider connectionsProvider = (connectionDescriptorsRequest,
            expectedReleaseGroup) -> {
        try {
            RingTopology systemRing = topologyProvider.get().call();
            List<ConnectionDescriptor> descriptors = Lists.newArrayList(Iterables.transform(systemRing.entries,
                    input -> new ConnectionDescriptor(instanceDescriptor, sslEnable, true,
                            new HostPort(input.ringHost.getHost(), input.ringHost.getPort()),
                            Collections.emptyMap(), Collections.emptyMap())));
            return new ConnectionDescriptorsResponse(200, Collections.emptyList(), "", descriptors,
                    connectionDescriptorsRequest.getRequestUuid());
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    };

    TenantsServiceConnectionDescriptorProvider<String> connectionPoolProvider = new TenantsServiceConnectionDescriptorProvider<>(
            Executors.newScheduledThreadPool(1), "", connectionsProvider, "", "", 10_000); // TODO config
    connectionPoolProvider.start();

    HttpDeliveryClientHealthProvider clientHealthProvider = new HttpDeliveryClientHealthProvider("", null, "",
            5000, 100);

    TenantRoutingHttpClientInitializer<String> nonSigningClientInitializer = new TenantRoutingHttpClientInitializer<>(
            null);

    TenantAwareHttpClient<String> systemTakeClient = nonSigningClientInitializer
            .builder(noAuthConnectionPoolProvider, // TODO config
                    clientHealthProvider)
            .deadAfterNErrors(10).checkDeadEveryNMillis(10_000).maxConnections(1_000)
            .socketTimeoutInMillis(60_000).build(); // TODO expose to conf
    TenantAwareHttpClient<String> stripedTakeClient = nonSigningClientInitializer
            .builder(noAuthConnectionPoolProvider, // TODO config
                    clientHealthProvider)
            .deadAfterNErrors(10).checkDeadEveryNMillis(10_000).maxConnections(1_000)
            .socketTimeoutInMillis(60_000).build(); // TODO expose to conf

    TenantRoutingHttpClientInitializer<String> tenantRoutingHttpClientInitializer = new TenantRoutingHttpClientInitializer<>(
            new OAuthSignerProvider(() -> authSigner));

    TenantAwareHttpClient<String> ringClient = tenantRoutingHttpClientInitializer
            .builder(connectionPoolProvider, // TODO config
                    clientHealthProvider)
            .deadAfterNErrors(10).checkDeadEveryNMillis(10_000).maxConnections(1_000)
            .socketTimeoutInMillis(60_000).build(); // TODO expose to conf
    AmzaStats amzaStats = new AmzaStats();

    AmzaService amzaService = startAmza(workingDir, amzaStats, baInterner, writerId,
            new RingHost(datacenter, rack, ringHost.getHost(), ringHost.getPort()),
            new RingMember(ringHost.getHost() + ":" + ringHost.getPort()), authSigner, systemTakeClient,
            stripedTakeClient, ringClient, topologyProvider, clusterDiscoveryName, multicastGroup,
            multicastPort);

    EmbeddedClientProvider embeddedClientProvider = new EmbeddedClientProvider(amzaService);

    LOG.info("-----------------------------------------------------------------------");
    LOG.info("|      Amza Service Online");
    LOG.info("-----------------------------------------------------------------------");

    ObjectMapper storeMapper = new ObjectMapper();
    mapper.configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false);
    mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
    UpenaConfigStore upenaConfigStore = new UpenaConfigStore(orderIdProvider, storeMapper, upenaAmzaService,
            amzaService, embeddedClientProvider);

    LOG.info("-----------------------------------------------------------------------");
    LOG.info("|      Upena Config Store Online");
    LOG.info("-----------------------------------------------------------------------");

    ExecutorService instanceChangedThreads = Executors.newFixedThreadPool(32);

    AtomicReference<UbaService> ubaServiceReference = new AtomicReference<>();
    UpenaStore upenaStore = new UpenaStore(storeMapper, upenaAmzaService, (instanceChanges) -> {
        instanceChangedThreads.submit(() -> {
            UbaService got = ubaServiceReference.get();
            if (got != null) {
                try {
                    got.instanceChanged(instanceChanges);
                } catch (Exception ex) {
                    ex.printStackTrace();
                }
            }
        });
    }, (changes) -> {
    }, (change) -> {
        LOG.info("TODO: tie into conductor. " + change);
    }, amzaService, embeddedClientProvider);
    //upenaStore.attachWatchers();

    ChaosService chaosService = new ChaosService(upenaStore);
    SecureRandom random = new SecureRandom();
    PasswordStore passwordStore = (key) -> {
        String password = System.getProperty("sauth.keystore.password");
        if (password == null) {
            File passwordFile = new File(workingDir, "keystore/" + key + ".key");
            if (passwordFile.exists()) {
                password = Files.toString(passwordFile, StandardCharsets.UTF_8);
            } else {
                passwordFile.getParentFile().mkdirs();
                password = new BigInteger(130, random).toString(32);
                Files.write(password, passwordFile, StandardCharsets.UTF_8);
            }
        }
        return password;
    };

    SessionStore sessionStore = new SessionStore(TimeUnit.MINUTES.toMillis(60), TimeUnit.MINUTES.toMillis(30));

    AtomicReference<UpenaHealth> upenaHealthProvider = new AtomicReference<>();
    InstanceHealthly instanceHealthly = (key, version) -> {
        UpenaHealth upenaHealth = upenaHealthProvider.get();
        if (upenaHealth == null) {
            return false;
        }
        ConcurrentMap<RingHost, NodeHealth> ringHostNodeHealth = upenaHealth.buildClusterHealth();
        for (NodeHealth nodeHealth : ringHostNodeHealth.values()) {
            for (NannyHealth nannyHealth : nodeHealth.nannyHealths) {
                if (nannyHealth.instanceDescriptor.instanceKey.equals(key.getKey())) {
                    return nannyHealth.serviceHealth.fullyOnline
                            ? nannyHealth.serviceHealth.version.equals(version)
                            : false;
                }
            }
        }
        return false;
    };
    UpenaService upenaService = new UpenaService(passwordStore, sessionStore, upenaStore, chaosService,
            instanceHealthly);

    LOG.info("-----------------------------------------------------------------------");
    LOG.info("|      Upena Service Online");
    LOG.info("-----------------------------------------------------------------------");

    File defaultPathToRepo = new File(new File(System.getProperty("user.dir"), ".m2"), "repository");
    PathToRepo localPathToRepo = new PathToRepo(
            new File(System.getProperty("pathToRepo", defaultPathToRepo.getAbsolutePath())));
    RepositoryProvider repositoryProvider = new RepositoryProvider(localPathToRepo);

    Host host = new Host(publicHost, datacenter, rack, ringHost.getHost(), ringHost.getPort(), workingDir, null,
            null);
    HostKey hostKey = new HostKeyProvider().getNodeKey(upenaStore.hosts, host);

    String hostInstanceId = System.getProperty("host.instance.id", hostKey.getKey());
    host = new Host(publicHost, datacenter, rack, ringHost.getHost(), ringHost.getPort(), workingDir,
            hostInstanceId, null);

    UbaLog ubaLog = (what, why, how) -> {
        try {
            upenaStore.record("Uba", what, System.currentTimeMillis(), why,
                    ringHost.getHost() + ":" + ringHost.getPort(), how);
        } catch (Exception x) {
            x.printStackTrace(); // Hmm lame
        }
    };

    OktaLog oktaLog = (who, what, why, how) -> {
        try {
            upenaStore.record("okta:" + who, what, System.currentTimeMillis(), why,
                    ringHost.getHost() + ":" + ringHost.getPort(), how);
        } catch (Exception x) {
            x.printStackTrace(); // Hmm lame
        }
    };

    OktaCredentialsMatcher.oktaLog = oktaLog;
    OktaRealm.oktaLog = oktaLog;

    UpenaClient upenaClient = new UpenaClient() {
        @Override
        public InstanceDescriptorsResponse instanceDescriptor(
                InstanceDescriptorsRequest instanceDescriptorsRequest) throws Exception {
            return upenaService.instanceDescriptors(instanceDescriptorsRequest);
        }

        @Override
        public void updateKeyPair(String instanceKey, String publicKey) throws Exception {
            Instance i = upenaStore.instances.get(new InstanceKey(instanceKey));
            if (i != null) {
                LOG.info("Updating publicKey for {}", instanceKey);
                upenaStore.instances.update(new InstanceKey(instanceKey),
                        new Instance(i.clusterKey, i.hostKey, i.serviceKey, i.releaseGroupKey, i.instanceId,
                                i.enabled, i.locked, publicKey, i.restartTimestampGMTMillis, i.ports));
            }
        }

    };

    final UbaService ubaService = new UbaServiceInitializer().initialize(passwordStore, upenaClient,
            repositoryProvider, hostKey.getKey(), workingDir,
            new UbaCoordinate(datacenter, rack, publicHost, host.hostName, "localhost", loopbackPort), null,
            ubaLog);

    UpenaHealth upenaHealth = new UpenaHealth(amzaService, upenaSSLConfig, upenaConfigStore, ubaService,
            new RingHost(datacenter, rack, ringHost.getHost(), ringHost.getPort()), hostKey);
    upenaHealthProvider.set(upenaHealth);

    DiscoveredRoutes discoveredRoutes = new DiscoveredRoutes();
    ShiroRequestHelper shiroRequestHelper = new ShiroRequestHelper(TimeUnit.DAYS.toMillis(1)); // TODO expose Sys prop?

    String shiroConfigLocation = System.getProperty("shiro.ini.location", "classpath:shiro.ini"); // classpath:oktashiro.ini

    UpenaJerseyEndpoints jerseyEndpoints = new UpenaJerseyEndpoints(shiroConfigLocation)
            .addInjectable(ShiroRequestHelper.class, shiroRequestHelper)
            .addEndpoint(UpenaClusterRestEndpoints.class).addEndpoint(UpenaHostRestEndpoints.class)
            .addEndpoint(UpenaServiceRestEndpoints.class).addEndpoint(UpenaReleaseRestEndpoints.class)
            .addEndpoint(UpenaInstanceRestEndpoints.class).addEndpoint(UpenaTenantRestEndpoints.class)
            .addInjectable(upenaHealth).addInjectable(upenaService).addInjectable(upenaStore)
            .addInjectable(upenaConfigStore).addInjectable(ubaService)
            //.addEndpoint(AmzaReplicationRestEndpoints.class)
            //.addInjectable(UpenaAmzaInstance.class, upenaAmzaService)
            .addEndpoint(UpenaEndpoints.class).addEndpoint(UpenaConnectivityEndpoints.class)
            .addEndpoint(UpenaManagedDeployableEndpoints.class).addEndpoint(UpenaHealthEndpoints.class)
            .addEndpoint(UpenaRepoEndpoints.class).addInjectable(DiscoveredRoutes.class, discoveredRoutes)
            .addInjectable(UpenaRingHost.class, ringHost).addInjectable(HostKey.class, hostKey)
            .addInjectable(UpenaAutoRelease.class, new UpenaAutoRelease(repositoryProvider, upenaStore))
            .addInjectable(PathToRepo.class, localPathToRepo);

    PercentileHealthCheckConfig phcc = bindDefault(PercentileHealthCheckConfig.class);
    PercentileHealthChecker authFilterHealthCheck = new PercentileHealthChecker(phcc);
    AuthValidationFilter authValidationFilter = new AuthValidationFilter(authFilterHealthCheck);
    authValidationFilter.addEvaluator(new NoAuthEvaluator(), "/", "/swagger.json", "/ui/*", // Handled by Shiro
            "/repo/*" // Cough
    );

    OAuth1Signature verifier = new OAuth1Signature(new OAuthServiceLocatorShim());
    OAuthSecretManager oAuthSecretManager = new OAuthSecretManager() {
        @Override
        public void clearCache() {
        }

        @Override
        public String getSecret(String id) throws AuthValidationException {
            return id.equals(finalConsumerKey) ? finalSecret : null;
        }

        @Override
        public void verifyLastSecretRemovalTime() throws Exception {
        }
    };
    AuthValidator<OAuth1Signature, OAuth1Request> oAuthValidator = new DefaultOAuthValidator(
            Executors.newScheduledThreadPool(1), Long.MAX_VALUE, oAuthSecretManager, 60_000, false, false);
    oAuthValidator.start();
    authValidationFilter.addEvaluator(new NoAuthEvaluator(), "/repo/*", "/amza/rows/stream/*",
            "/amza/rows/taken/*", "/amza/pong/*", "/amza/invalidate/*");
    authValidationFilter.addEvaluator(new OAuthEvaluator(oAuthValidator, verifier), "/upena/*", "/amza/*");

    // TODO something better someday
    String upenaApiUsername = System.getProperty("upena.api.username", null);
    String upenaApiPassword = System.getProperty("upena.api.password", null);

    if (upenaApiUsername != null && upenaApiPassword != null) {
        authValidationFilter.addEvaluator(containerRequestContext -> {
            String authCredentials = containerRequestContext.getHeaderString("Authorization");
            if (authCredentials == null) {
                return AuthStatus.not_handled;
            }

            final String encodedUserPassword = authCredentials.replaceFirst("Basic" + " ", "");
            String usernameAndPassword = null;
            try {
                byte[] decodedBytes = Base64.getDecoder().decode(encodedUserPassword);
                usernameAndPassword = new String(decodedBytes, "UTF-8");
            } catch (IOException e) {
                return AuthStatus.denied;
            }
            final StringTokenizer tokenizer = new StringTokenizer(usernameAndPassword, ":");
            final String username = tokenizer.nextToken();
            final String password = tokenizer.nextToken();

            boolean authenticationStatus = upenaApiUsername.equals(username)
                    && upenaApiPassword.equals(password);

            return authenticationStatus ? AuthStatus.authorized : AuthStatus.denied;
        }, "/api/*");
    }

    jerseyEndpoints.addContainerRequestFilter(authValidationFilter);

    String region = System.getProperty("aws.region", null);
    String roleArn = System.getProperty("aws.roleArn", null);

    AWSClientFactory awsClientFactory = new AWSClientFactory(region, roleArn);

    String accountName = System.getProperty("account.name",
            clusterDiscoveryName == null ? "" : clusterDiscoveryName);
    String humanReadableUpenaClusterName = datacenter + " - " + accountName;
    injectUI(upenaVersion, awsClientFactory, storeMapper, mapper, jvmapi, amzaService, localPathToRepo,
            repositoryProvider, hostKey, ringHost, upenaSSLConfig, port, sessionStore, ubaService, upenaHealth,
            upenaStore, upenaConfigStore, jerseyEndpoints, humanReadableUpenaClusterName, discoveredRoutes);

    injectAmza(baInterner, amzaStats, jerseyEndpoints, amzaService, ringClient);

    InitializeRestfulServer initializeRestfulServer = new InitializeRestfulServer(false, port, "UpenaNode",
            sslEnable, sslKeyStoreAlias, sslKeystorePassword, sslKeystorePath, 128, 10_000);

    buildSwagger();
    initializeRestfulServer.addClasspathResource("/resources");
    initializeRestfulServer.addContextHandler("/", jerseyEndpoints);

    RestfulServer restfulServer = initializeRestfulServer.build();
    restfulServer.start();

    LOG.info("-----------------------------------------------------------------------");
    LOG.info("|      Jetty Service Online");
    LOG.info("-----------------------------------------------------------------------");

    UpenaJerseyEndpoints loopbackJerseyEndpoints = new UpenaJerseyEndpoints(null)
            .addEndpoint(UpenaLoopbackEndpoints.class).addEndpoint(UpenaConfigRestEndpoints.class)
            .addInjectable(SessionStore.class, sessionStore)
            .addInjectable(DiscoveredRoutes.class, discoveredRoutes).addInjectable(upenaConfigStore)
            .addInjectable(upenaStore).addInjectable(upenaHealth)
            .addInjectable(UpenaService.class, upenaService);

    InitializeRestfulServer initializeLoopbackRestfulServer = new InitializeRestfulServer(
            Boolean.parseBoolean(System.getProperty("amza.loopback.strict", "true")), loopbackPort, "UpenaNode",
            false, sslKeyStoreAlias, sslKeystorePassword, sslKeystorePath, 128, 10_000);
    initializeLoopbackRestfulServer.addClasspathResource("/resources");
    initializeLoopbackRestfulServer.addContextHandler("/", loopbackJerseyEndpoints);

    RestfulServer loopbackRestfulServer = initializeLoopbackRestfulServer.build();
    loopbackRestfulServer.start();

    LOG.info("-----------------------------------------------------------------------");
    LOG.info("|      Jetty Service Online");
    LOG.info("-----------------------------------------------------------------------");

    if (ubaService != null) {
        Executors.newScheduledThreadPool(1).scheduleWithFixedDelay(() -> {
            try {
                ubaService.nanny();
            } catch (Exception ex) {
                LOG.error("Nanny failure", ex);
            }
        }, 15, 15, TimeUnit.SECONDS);
        LOG.info("-----------------------------------------------------------------------");
        LOG.info("|      Uba Service Online");
        LOG.info("-----------------------------------------------------------------------");
    }
    ubaServiceReference.set(ubaService);

    /*String peers = System.getProperty("manual.peers");
    if (peers != null) {
    String[] hostPortTuples = peers.split(",");
    for (String hostPortTuple : hostPortTuples) {
        String hostPort = hostPortTuple.trim();
        if (hostPort.length() > 0 && hostPort.contains(":")) {
            String[] host_port = hostPort.split(":");
            try {
                UpenaRingHost anotherRingHost = new UpenaRingHost(host_port[0].trim(), Integer.parseInt(host_port[1].trim()));
                List<UpenaRingHost> ring = upenaAmzaService.getRing("master");
                if (!ring.contains(anotherRingHost)) {
                    LOG.info("Adding host to the cluster: " + anotherRingHost);
                    upenaAmzaService.addRingHost("master", anotherRingHost);
                }
            } catch (Exception x) {
                LOG.warn("Malformed hostPortTuple {}", hostPort);
            }
        } else {
            LOG.warn("Malformed hostPortTuple {}", hostPort);
        }
    }
    }*/

    String vpc = System.getProperty("aws.vpc", null);
    UpenaAWSLoadBalancerNanny upenaAWSLoadBalancerNanny = new UpenaAWSLoadBalancerNanny(vpc, upenaStore,
            hostKey, awsClientFactory);

    Executors.newScheduledThreadPool(1).scheduleWithFixedDelay(() -> {
        try {
            upenaAWSLoadBalancerNanny.ensureSelf();
        } catch (Exception x) {
            LOG.warn("Failures while nannying load loadbalancer.", x);
        }
    }, 1, 1, TimeUnit.MINUTES); // TODO better

    LOG.info("-----------------------------------------------------------------------");
    LOG.info("|     Waiting for amza to be ready....");
    LOG.info("-----------------------------------------------------------------------");
    while (!amzaService.isReady()) {
        Thread.sleep(1000);
    }

    LOG.info("-----------------------------------------------------------------------");
    LOG.info("|     Begin Migration");
    LOG.info("-----------------------------------------------------------------------");

    upenaStore.init(orderIdProvider, Integer.parseInt(System.getProperty("min.service.port", "10000")),
            Integer.parseInt(System.getProperty("max.service.port", String.valueOf(Short.MAX_VALUE))), false);

    upenaConfigStore.init();

    LOG.info("-----------------------------------------------------------------------");
    LOG.info("|     End Migration");
    LOG.info("-----------------------------------------------------------------------");

    addManualPeers(amzaService);

    Host gotHost = upenaStore.hosts.get(hostKey);
    if (gotHost == null || !gotHost.equals(host)) {
        upenaStore.hosts.update(hostKey, host);
    }
}

From source file:org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices.java

@GET
@Path("/appstatistics")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public ApplicationStatisticsInfo getAppStatistics(@Context HttpServletRequest hsr,
        @QueryParam("states") Set<String> stateQueries,
        @QueryParam("applicationTypes") Set<String> typeQueries) {
    init();//  w w w  .  ja va  2  s  .  c  om

    // parse the params and build the scoreboard
    // converting state/type name to lowercase
    Set<String> states = parseQueries(stateQueries, true);
    Set<String> types = parseQueries(typeQueries, false);
    // if no types, counts the applications of any types
    if (types.size() == 0) {
        types.add(ANY);
    } else if (types.size() != 1) {
        throw new BadRequestException("# of applicationTypes = " + types.size()
                + ", we temporarily support at most one applicationType");
    }
    // if no states, returns the counts of all RMAppStates
    if (states.size() == 0) {
        for (YarnApplicationState state : YarnApplicationState.values()) {
            states.add(StringUtils.toLowerCase(state.toString()));
        }
    }
    // in case we extend to multiple applicationTypes in the future
    Map<YarnApplicationState, Map<String, Long>> scoreboard = buildScoreboard(states, types);

    // go through the apps in RM to count the numbers, ignoring the case of
    // the state/type name
    ConcurrentMap<ApplicationId, RMApp> apps = rm.getRMContext().getRMApps();
    for (RMApp rmapp : apps.values()) {
        YarnApplicationState state = rmapp.createApplicationState();
        String type = StringUtils.toLowerCase(rmapp.getApplicationType().trim());
        if (states.contains(StringUtils.toLowerCase(state.toString()))) {
            if (types.contains(ANY)) {
                countApp(scoreboard, state, ANY);
            } else if (types.contains(type)) {
                countApp(scoreboard, state, type);
            }
        }
    }

    // fill the response object
    ApplicationStatisticsInfo appStatInfo = new ApplicationStatisticsInfo();
    for (Map.Entry<YarnApplicationState, Map<String, Long>> partScoreboard : scoreboard.entrySet()) {
        for (Map.Entry<String, Long> statEntry : partScoreboard.getValue().entrySet()) {
            StatisticsItemInfo statItem = new StatisticsItemInfo(partScoreboard.getKey(), statEntry.getKey(),
                    statEntry.getValue());
            appStatInfo.add(statItem);
        }
    }
    return appStatInfo;
}

From source file:org.apache.hadoop.yarn.server.resourcemanager.TestRMRestart.java

private void verifyNodesAfterDecom(MockRM rm, int numNodes, Resource expectedCapability,
        String expectedVersion) {
    ConcurrentMap<NodeId, RMNode> inactiveRMNodes = rm.getRMContext().getInactiveRMNodes();
    Assert.assertEquals(numNodes, inactiveRMNodes.size());
    for (RMNode rmNode : inactiveRMNodes.values()) {
        Assert.assertEquals(expectedCapability, rmNode.getTotalCapability());
        Assert.assertEquals(expectedVersion, rmNode.getNodeManagerVersion());
    }/*from   w  w  w .  j  av a  2s  . c  om*/
}

From source file:org.apache.hadoop.yarn.util.TestFSDownload.java

@Test(timeout = 60000)
public void testDownloadPublicWithStatCache()
        throws IOException, URISyntaxException, InterruptedException, ExecutionException {
    final Configuration conf = new Configuration();
    FileContext files = FileContext.getLocalFSFileContext(conf);
    Path basedir = files.makeQualified(new Path("target", TestFSDownload.class.getSimpleName()));

    // if test directory doesn't have ancestor permission, skip this test
    FileSystem f = basedir.getFileSystem(conf);
    assumeTrue(FSDownload.ancestorsHaveExecutePermissions(f, basedir, null));

    files.mkdir(basedir, null, true);//from   w  w w . ja  va 2s  .  c o  m
    conf.setStrings(TestFSDownload.class.getName(), basedir.toString());

    int size = 512;

    final ConcurrentMap<Path, AtomicInteger> counts = new ConcurrentHashMap<Path, AtomicInteger>();
    final CacheLoader<Path, Future<FileStatus>> loader = FSDownload.createStatusCacheLoader(conf);
    final LoadingCache<Path, Future<FileStatus>> statCache = CacheBuilder.newBuilder()
            .build(new CacheLoader<Path, Future<FileStatus>>() {
                public Future<FileStatus> load(Path path) throws Exception {
                    // increment the count
                    AtomicInteger count = counts.get(path);
                    if (count == null) {
                        count = new AtomicInteger(0);
                        AtomicInteger existing = counts.putIfAbsent(path, count);
                        if (existing != null) {
                            count = existing;
                        }
                    }
                    count.incrementAndGet();

                    // use the default loader
                    return loader.load(path);
                }
            });

    // test FSDownload.isPublic() concurrently
    final int fileCount = 3;
    List<Callable<Boolean>> tasks = new ArrayList<Callable<Boolean>>();
    for (int i = 0; i < fileCount; i++) {
        Random rand = new Random();
        long sharedSeed = rand.nextLong();
        rand.setSeed(sharedSeed);
        System.out.println("SEED: " + sharedSeed);
        final Path path = new Path(basedir, "test-file-" + i);
        createFile(files, path, size, rand);
        final FileSystem fs = path.getFileSystem(conf);
        final FileStatus sStat = fs.getFileStatus(path);
        tasks.add(new Callable<Boolean>() {
            public Boolean call() throws IOException {
                return FSDownload.isPublic(fs, path, sStat, statCache);
            }
        });
    }

    ExecutorService exec = Executors.newFixedThreadPool(fileCount);
    try {
        List<Future<Boolean>> futures = exec.invokeAll(tasks);
        // files should be public
        for (Future<Boolean> future : futures) {
            assertTrue(future.get());
        }
        // for each path exactly one file status call should be made
        for (AtomicInteger count : counts.values()) {
            assertSame(count.get(), 1);
        }
    } finally {
        exec.shutdown();
    }
}

From source file:co.paralleluniverse.galaxy.core.Cache.java

private void processLines(LinePredicate lp) {
    for (ConcurrentMap<Long, CacheLine> map : new ConcurrentMap[] { owned, shared }) {
        for (Iterator<CacheLine> it = map.values().iterator(); it.hasNext();) {
            CacheLine line = it.next();/*from w  w  w  .j  a  v a  2s . co m*/
            final boolean retain;
            synchronized (line) {
                retain = lp.processLine(line);
                if (!retain)
                    discardLine(line, false);
            }
            if (!retain)
                it.remove();
        }
    }
}

From source file:org.dkpro.lab.engine.impl.MultiThreadBatchTaskEngine.java

@Override
protected void executeConfiguration(BatchTask aConfiguration, TaskContext aContext, Map<String, Object> aConfig,
        Set<String> aExecutedSubtasks) throws ExecutionException, LifeCycleException {
    if (log.isTraceEnabled()) {
        // Show all subtasks executed so far
        for (String est : aExecutedSubtasks) {
            log.trace("-- Already executed: " + est);
        }//  w  w  w.  j a  va 2  s  .  com
    }

    // Set up initial scope used by sub-batch-tasks using the inherited scope. The scope is
    // extended as the subtasks of this batch are executed with the present configuration.
    // FIXME: That means that sub-batch-tasks in two different configurations cannot see
    // each other. Is that intended? Mind that the "executedSubtasks" set is intentionally
    // maintained *across* configurations, so maybe the scope should also be maintained
    // *across* configurations? - REC 2014-06-15
    Set<String> scope = new HashSet<>();
    if (aConfiguration.getScope() != null) {
        scope.addAll(aConfiguration.getScope());
    }

    // Configure subtasks
    for (Task task : aConfiguration.getTasks()) {
        // Now the setup is complete
        aContext.getLifeCycleManager().configure(aContext, task, aConfig);
    }

    Queue<Task> queue = new LinkedList<>(aConfiguration.getTasks());
    // keeps track of the execution threads; 
    // TODO MW: do we really need this or can we work with the futures list only?
    Map<Task, ExecutionThread> threads = new HashMap<>();
    // keeps track of submitted Futures and their associated tasks
    Map<Future<?>, Task> futures = new HashMap<Future<?>, Task>();
    // will be instantiated with all exceptions from current loop
    ConcurrentMap<Task, Throwable> exceptionsFromLastLoop = null;
    ConcurrentMap<Task, Throwable> exceptionsFromCurrentLoop = new ConcurrentHashMap<>();

    int outerLoopCounter = 0;

    // main loop
    do {
        outerLoopCounter++;

        threads.clear();
        futures.clear();
        ExecutorService executor = Executors.newFixedThreadPool(maxThreads);

        // set the exceptions from the last loop
        exceptionsFromLastLoop = new ConcurrentHashMap<>(exceptionsFromCurrentLoop);

        // Fix MW: Clear exceptionsFromCurrentLoop; otherwise the loop with run at most twice.
        exceptionsFromCurrentLoop.clear();

        // process all tasks from the queue
        while (!queue.isEmpty()) {
            Task task = queue.poll();

            TaskContextMetadata execution = getExistingExecution(aConfiguration, aContext, task, aConfig,
                    aExecutedSubtasks);

            // Check if a subtask execution compatible with the present configuration has
            // does already exist ...
            if (execution == null) {
                // ... otherwise execute it with the present configuration
                log.info("Executing task [" + task.getType() + "]");

                // set scope here so that the inherited scopes are considered
                if (task instanceof BatchTask) {
                    ((BatchTask) task).setScope(scope);
                }

                ExecutionThread thread = new ExecutionThread(aContext, task, aConfig, aExecutedSubtasks);
                threads.put(task, thread);

                futures.put(executor.submit(thread), task);
            } else {
                log.debug("Using existing execution [" + execution.getId() + "]");

                // Record new/existing execution
                aExecutedSubtasks.add(execution.getId());
                scope.add(execution.getId());
            }
        }

        // try and get results from all futures to check for failed executions
        for (Map.Entry<Future<?>, Task> entry : futures.entrySet()) {
            try {
                entry.getKey().get();
            } catch (java.util.concurrent.ExecutionException ex) {
                Task task = entry.getValue();
                // TODO MW: add a retry-counter here to prevent endless loops?
                log.info("Task exec failed for [" + task.getType() + "]");
                // record the failed task, so that it can be re-added to the queue
                exceptionsFromCurrentLoop.put(task, ex);
            } catch (InterruptedException ex) {
                // thread interrupted, exit
                throw new RuntimeException(ex);
            }
        }

        log.debug("Calling shutdown");
        executor.shutdown();
        log.debug("All threads finished");

        // collect the results
        for (Map.Entry<Task, ExecutionThread> entry : threads.entrySet()) {
            Task task = entry.getKey();
            ExecutionThread thread = entry.getValue();
            TaskContextMetadata execution = thread.getTaskContextMetadata();

            // probably failed
            if (execution == null) {
                Throwable exception = exceptionsFromCurrentLoop.get(task);
                if (!(exception instanceof UnresolvedImportException)
                        && !(exception instanceof java.util.concurrent.ExecutionException)) {
                    throw new RuntimeException(exception);
                }
                exceptionsFromCurrentLoop.put(task, exception);

                // re-add to the queue
                queue.add(task);
            } else {

                // Record new/existing execution
                aExecutedSubtasks.add(execution.getId());
                scope.add(execution.getId());
            }
        }

    }
    // finish if the same tasks failed again
    while (!exceptionsFromCurrentLoop.keySet().equals(exceptionsFromLastLoop.keySet()));
    // END OF DO; finish if the same tasks failed again

    if (!exceptionsFromCurrentLoop.isEmpty()) {
        // collect all details
        StringBuilder details = new StringBuilder();
        for (Throwable throwable : exceptionsFromCurrentLoop.values()) {
            details.append("\n -");
            details.append(throwable.getMessage());
        }

        // we re-throw the first exception
        Throwable next = exceptionsFromCurrentLoop.values().iterator().next();
        if (next instanceof RuntimeException) {
            throw (RuntimeException) next;
        }

        // otherwise wrap it
        throw new RuntimeException(details.toString(), next);
    }
    log.info("MultiThreadBatchTask completed successfully. Total number of outer loop runs: "
            + outerLoopCounter);
}