Example usage for java.nio.file Paths get

List of usage examples for java.nio.file Paths get

Introduction

In this page you can find the example usage for java.nio.file Paths get.

Prototype

public static Path get(URI uri) 

Source Link

Document

Converts the given URI to a Path object.

Usage

From source file:es.upm.oeg.tools.rdfshapes.utils.CadinalityResultGenerator.java

public static void main(String[] args) throws Exception {

    String endpoint = "http://3cixty.eurecom.fr/sparql";

    List<String> classList = Files.readAllLines(Paths.get(classListPath), Charset.defaultCharset());

    String classPropertyQueryString = readFile(classPropertyQueryPath, Charset.defaultCharset());
    String propertyCardinalityQueryString = readFile(propertyCardinalityQueryPath, Charset.defaultCharset());
    String individualCountQueryString = readFile(individualCountQueryPath, Charset.defaultCharset());

    DecimalFormat df = new DecimalFormat("0.0000");

    //Create the Excel workbook and sheet
    XSSFWorkbook wb = new XSSFWorkbook();
    XSSFSheet sheet = wb.createSheet("Cardinality");

    int currentExcelRow = 0;
    int classStartRow = 0;

    for (String clazz : classList) {

        Map<String, String> litMap = new HashMap<>();
        Map<String, String> iriMap = ImmutableMap.of("class", clazz);

        String queryString = bindQueryString(individualCountQueryString,
                ImmutableMap.of(IRI_BINDINGS, iriMap, LITERAL_BINDINGS, litMap));

        int individualCount;
        List<RDFNode> c = executeQueryForList(queryString, endpoint, "c");
        if (c.size() == 1) {
            individualCount = c.get(0).asLiteral().getInt();
        } else {/*ww  w  .ja v  a2  s  .  c om*/
            continue;
        }

        // If there are zero individuals, continue
        if (individualCount == 0) {
            throw new IllegalStateException("Check whether " + classListPath + " and " + endpoint + " match.");
        }

        //            System.out.println("***");
        //            System.out.println("### **" + clazz + "** (" + individualCount + ")");
        //            System.out.println("***");
        //            System.out.println();

        classStartRow = currentExcelRow;
        XSSFRow row = sheet.createRow(currentExcelRow);
        XSSFCell cell = row.createCell(0);
        cell.setCellValue(clazz);
        cell.getCellStyle().setAlignment(CellStyle.ALIGN_CENTER);

        queryString = bindQueryString(classPropertyQueryString,
                ImmutableMap.of(IRI_BINDINGS, iriMap, LITERAL_BINDINGS, litMap));

        List<RDFNode> nodeList = executeQueryForList(queryString, endpoint, "p");

        for (RDFNode property : nodeList) {
            if (property.isURIResource()) {

                DescriptiveStatistics stats = new DescriptiveStatistics();

                String propertyURI = property.asResource().getURI();
                //                    System.out.println("* " + propertyURI);
                //                    System.out.println();

                XSSFRow propertyRow = sheet.getRow(currentExcelRow);
                if (propertyRow == null) {
                    propertyRow = sheet.createRow(currentExcelRow);
                }
                currentExcelRow++;

                XSSFCell propertyCell = propertyRow.createCell(1);
                propertyCell.setCellValue(propertyURI);

                Map<String, String> litMap2 = new HashMap<>();
                Map<String, String> iriMap2 = ImmutableMap.of("class", clazz, "p", propertyURI);

                queryString = bindQueryString(propertyCardinalityQueryString,
                        ImmutableMap.of(IRI_BINDINGS, iriMap2, LITERAL_BINDINGS, litMap2));

                List<Map<String, RDFNode>> solnMaps = executeQueryForList(queryString, endpoint,
                        ImmutableSet.of("card", "count"));

                int sum = 0;
                List<CardinalityCount> cardinalityList = new ArrayList<>();
                if (solnMaps.size() > 0) {

                    for (Map<String, RDFNode> soln : solnMaps) {
                        int count = soln.get("count").asLiteral().getInt();
                        int card = soln.get("card").asLiteral().getInt();

                        for (int i = 0; i < count; i++) {
                            stats.addValue(card);
                        }

                        CardinalityCount cardinalityCount = new CardinalityCount(card, count,
                                (((double) count) / individualCount) * 100);
                        cardinalityList.add(cardinalityCount);
                        sum += count;
                    }

                    // Check for zero cardinality instances
                    int count = individualCount - sum;
                    if (count > 0) {
                        for (int i = 0; i < count; i++) {
                            stats.addValue(0);
                        }
                        CardinalityCount cardinalityCount = new CardinalityCount(0, count,
                                (((double) count) / individualCount) * 100);
                        cardinalityList.add(cardinalityCount);
                    }
                }

                Map<Integer, Double> cardMap = new HashMap<>();
                for (CardinalityCount count : cardinalityList) {
                    cardMap.put(count.getCardinality(), count.getPrecentage());
                }

                XSSFCell instanceCountCell = propertyRow.createCell(2);
                instanceCountCell.setCellValue(individualCount);

                XSSFCell minCell = propertyRow.createCell(3);
                minCell.setCellValue(stats.getMin());

                XSSFCell maxCell = propertyRow.createCell(4);
                maxCell.setCellValue(stats.getMax());

                XSSFCell p1 = propertyRow.createCell(5);
                p1.setCellValue(stats.getPercentile(1));

                XSSFCell p99 = propertyRow.createCell(6);
                p99.setCellValue(stats.getPercentile(99));

                XSSFCell mean = propertyRow.createCell(7);
                mean.setCellValue(df.format(stats.getMean()));

                for (int i = 0; i < 21; i++) {
                    XSSFCell dataCell = propertyRow.createCell(8 + i);
                    Double percentage = cardMap.get(i);
                    if (percentage != null) {
                        dataCell.setCellValue(df.format(percentage));
                    } else {
                        dataCell.setCellValue(0);
                    }
                }

                //                    System.out.println("| Min Card. |Max Card. |");
                //                    System.out.println("|---|---|");
                //                    System.out.println("| ? | ? |");
                //                    System.out.println();

            }
        }

        //System.out.println("class start: " + classStartRow + ", class end: " + (currentExcelRow -1));
        //We have finished writting properties of one class, now it's time to merge the cells
        int classEndRow = currentExcelRow - 1;
        if (classStartRow < classEndRow) {
            sheet.addMergedRegion(new CellRangeAddress(classStartRow, classEndRow, 0, 0));
        }

    }

    String filename = "3cixty.xls";
    FileOutputStream fileOut = new FileOutputStream(filename);
    wb.write(fileOut);
    fileOut.close();
}

From source file:alluxio.fuse.AlluxioFuse.java

/**
 * Running this class will mount the file system according to
 * the options passed to this function {@link #parseOptions(String[])}.
 * The user-space fuse application will stay on the foreground and keep
 * the file system mounted. The user can unmount the file system by
 * gracefully killing (SIGINT) the process.
 *
 * @param args arguments to run the command line
 *///from  w  w  w.ja va 2s  .c  o  m
public static void main(String[] args) {
    final AlluxioFuseOptions opts = parseOptions(args);
    if (opts == null) {
        System.exit(1);
    }

    final FileSystem tfs = FileSystem.Factory.get();
    final AlluxioFuseFileSystem fs = new AlluxioFuseFileSystem(tfs, opts);
    final List<String> fuseOpts = opts.getFuseOpts();
    // Force direct_io in FUSE: writes and reads bypass the kernel page
    // cache and go directly to alluxio. This avoids extra memory copies
    // in the write path.
    fuseOpts.add("-odirect_io");

    try {
        fs.mount(Paths.get(opts.getMountPoint()), true, opts.isDebug(), fuseOpts.toArray(new String[0]));
    } finally {
        fs.umount();
    }
}

From source file:hdfs.MiniHDFS.java

public static void main(String[] args) throws Exception {
    if (args.length != 1 && args.length != 3) {
        throw new IllegalArgumentException(
                "Expected: MiniHDFS <baseDirectory> [<kerberosPrincipal> <kerberosKeytab>], " + "got: "
                        + Arrays.toString(args));
    }/*from w w w.j  a  va2s .co  m*/
    boolean secure = args.length == 3;

    // configure Paths
    Path baseDir = Paths.get(args[0]);
    // hadoop-home/, so logs will not complain
    if (System.getenv("HADOOP_HOME") == null) {
        Path hadoopHome = baseDir.resolve("hadoop-home");
        Files.createDirectories(hadoopHome);
        System.setProperty("hadoop.home.dir", hadoopHome.toAbsolutePath().toString());
    }
    // hdfs-data/, where any data is going
    Path hdfsHome = baseDir.resolve("hdfs-data");

    // configure cluster
    Configuration cfg = new Configuration();
    cfg.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsHome.toAbsolutePath().toString());
    // lower default permission: TODO: needed?
    cfg.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY, "766");

    // optionally configure security
    if (secure) {
        String kerberosPrincipal = args[1];
        String keytabFile = args[2];

        cfg.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
        cfg.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, "true");
        cfg.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, kerberosPrincipal);
        cfg.set(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, kerberosPrincipal);
        cfg.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, kerberosPrincipal);
        cfg.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, keytabFile);
        cfg.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, keytabFile);
        cfg.set(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, "true");
        cfg.set(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, "true");
        cfg.set(DFSConfigKeys.IGNORE_SECURE_PORTS_FOR_TESTING_KEY, "true");
    }

    UserGroupInformation.setConfiguration(cfg);

    // TODO: remove hardcoded port!
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(cfg);
    if (secure) {
        builder.nameNodePort(9998);
    } else {
        builder.nameNodePort(9999);
    }
    MiniDFSCluster dfs = builder.build();

    // Configure contents of the filesystem
    org.apache.hadoop.fs.Path esUserPath = new org.apache.hadoop.fs.Path("/user/elasticsearch");
    try (FileSystem fs = dfs.getFileSystem()) {

        // Set the elasticsearch user directory up
        fs.mkdirs(esUserPath);
        if (UserGroupInformation.isSecurityEnabled()) {
            List<AclEntry> acls = new ArrayList<>();
            acls.add(new AclEntry.Builder().setType(AclEntryType.USER).setName("elasticsearch")
                    .setPermission(FsAction.ALL).build());
            fs.modifyAclEntries(esUserPath, acls);
        }

        // Install a pre-existing repository into HDFS
        String directoryName = "readonly-repository";
        String archiveName = directoryName + ".tar.gz";
        URL readOnlyRepositoryArchiveURL = MiniHDFS.class.getClassLoader().getResource(archiveName);
        if (readOnlyRepositoryArchiveURL != null) {
            Path tempDirectory = Files.createTempDirectory(MiniHDFS.class.getName());
            File readOnlyRepositoryArchive = tempDirectory.resolve(archiveName).toFile();
            FileUtils.copyURLToFile(readOnlyRepositoryArchiveURL, readOnlyRepositoryArchive);
            FileUtil.unTar(readOnlyRepositoryArchive, tempDirectory.toFile());

            fs.copyFromLocalFile(true, true,
                    new org.apache.hadoop.fs.Path(
                            tempDirectory.resolve(directoryName).toAbsolutePath().toUri()),
                    esUserPath.suffix("/existing/" + directoryName));

            FileUtils.deleteDirectory(tempDirectory.toFile());
        }
    }

    // write our PID file
    Path tmp = Files.createTempFile(baseDir, null, null);
    String pid = ManagementFactory.getRuntimeMXBean().getName().split("@")[0];
    Files.write(tmp, pid.getBytes(StandardCharsets.UTF_8));
    Files.move(tmp, baseDir.resolve(PID_FILE_NAME), StandardCopyOption.ATOMIC_MOVE);

    // write our port file
    tmp = Files.createTempFile(baseDir, null, null);
    Files.write(tmp, Integer.toString(dfs.getNameNodePort()).getBytes(StandardCharsets.UTF_8));
    Files.move(tmp, baseDir.resolve(PORT_FILE_NAME), StandardCopyOption.ATOMIC_MOVE);
}

From source file:mcnutty.music.get.MusicGet.java

public static void main(String[] args) throws Exception {

    //print out music-get
    System.out.println("                     _                      _   ");
    System.out.println(" _ __ ___  _   _ ___(_) ___       __ _  ___| |_ ");
    System.out.println("| '_ ` _ \\| | | / __| |/ __|____ / _` |/ _ \\ __|");
    System.out.println("| | | | | | |_| \\__ \\ | (_|_____| (_| |  __/ |_ ");
    System.out.println("|_| |_| |_|\\__,_|___/_|\\___|     \\__, |\\___|\\__|");
    System.out.println("                                 |___/          \n");

    //these will always be initialised later (but the compiler doesn't know that)
    String directory = "";
    Properties prop = new Properties();

    try (InputStream input = new FileInputStream("config.properties")) {
        prop.load(input);//from   w  ww. jav a2 s .co m
        if (prop.getProperty("directory") != null) {
            directory = prop.getProperty("directory");
        } else {
            System.out.println(
                    "Error reading config property 'directory' - using default value of /tmp/musicserver/\n");
            directory = "/tmp/musicserver/";
        }
        if (prop.getProperty("password") == null) {
            System.out.println("Error reading config property 'password' - no default value, exiting\n");
            System.exit(1);
        }
    } catch (IOException e) {
        System.out.println("Error reading config file");
        System.exit(1);
    }

    //create a queue object
    ProcessQueue process_queue = new ProcessQueue();

    try {
        if (args.length > 0 && args[0].equals("clean")) {
            Files.delete(Paths.get("queue.json"));
        }
        //load an existing queue if possible
        String raw_queue = Files.readAllLines(Paths.get("queue.json")).toString();
        JSONArray queue_state = new JSONArray(raw_queue);
        ConcurrentLinkedQueue<QueueItem> loaded_queue = new ConcurrentLinkedQueue<>();
        JSONArray queue = queue_state.getJSONArray(0);
        for (int i = 0; i < queue.length(); i++) {
            JSONObject item = ((JSONObject) queue.get(i));
            QueueItem loaded_item = new QueueItem();
            loaded_item.ip = item.getString("ip");
            loaded_item.real_name = item.getString("name");
            loaded_item.disk_name = item.getString("guid");
            loaded_queue.add(loaded_item);
        }
        process_queue.bucket_queue = loaded_queue;
        System.out.println("Loaded queue from disk\n");
    } catch (Exception ex) {
        //otherwise clean out the music directory and start a new queue
        try {
            Files.walkFileTree(Paths.get(directory), new SimpleFileVisitor<Path>() {
                @Override
                public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
                    Files.delete(file);
                    return FileVisitResult.CONTINUE;
                }
            });
            Files.delete(Paths.get(directory));
        } catch (Exception e) {
            e.printStackTrace();
        }
        Files.createDirectory(Paths.get(directory));
        System.out.println("Created a new queue\n");
    }

    //start the web server
    StartServer start_server = new StartServer(process_queue, directory);
    new Thread(start_server).start();

    //wit for the web server to spool up
    Thread.sleep(1000);

    //read items from the queue and play them
    while (true) {
        QueueItem next_item = process_queue.next_item();
        if (!next_item.equals(new QueueItem())) {
            //Check the timeout
            int timeout = 547;
            try (FileInputStream input = new FileInputStream("config.properties")) {
                prop.load(input);
                timeout = Integer.parseInt(prop.getProperty("timeout", "547"));
            } catch (Exception e) {
                e.printStackTrace();
            }
            System.out.println("Playing " + next_item.real_name);
            process_queue.set_played(next_item);
            process_queue.save_queue();
            Process p = Runtime.getRuntime().exec("timeout " + timeout
                    + "s mplayer -fs -quiet -af volnorm=2:0.25 " + directory + next_item.disk_name);

            try {
                p.waitFor(timeout, TimeUnit.SECONDS);
                Files.delete(Paths.get(directory + next_item.disk_name));
            } catch (Exception e) {
                e.printStackTrace();
            }
        } else {
            process_queue.bucket_played.clear();
        }
        Thread.sleep(1000);
    }
}

From source file:net.cloudkit.enterprises.ws.SuperPassQueryTest.java

public static void main(String[] args) throws Exception {

    List<String> params = new ArrayList<>();
    // System.out.println(SuperPassQueryTest.class.getResource("/list.dat").toURI());
    Path path = Paths.get(SuperPassQueryTest.class.getResource("/list.dat").toURI());
    try (BufferedReader reader = Files.newBufferedReader(path, Charset.forName("UTF-8"))) {
        // System.out.println(reader.readLine().length());
        String line;// w  w  w .j  a  v  a  2  s.  c o m
        while ((line = reader.readLine()) != null) {
            // System.out.println("TEXT LINE:" + line);
            params.add(line);
        }
    }

    Path succeededFile = Paths.get(SuperPassQueryTest.class.getResource("/succeeded.dat").toURI());
    BufferedWriter succeededWriter = Files.newBufferedWriter(succeededFile, StandardCharsets.UTF_8,
            StandardOpenOption.APPEND);
    Path failedFile = Paths.get(SuperPassQueryTest.class.getResource("/failed.dat").toURI());
    BufferedWriter failedWriter = Files.newBufferedWriter(failedFile, StandardCharsets.UTF_8,
            StandardOpenOption.APPEND);

    for (String param : params) {
        try {
            /*
            StringTokenizer stringTokenizer = new StringTokenizer(param, ",");
            while(stringTokenizer.hasMoreTokens()){
            System.out.println("COUNT:" + stringTokenizer.countTokens());
            System.out.println("VALUE:" + stringTokenizer.nextToken());
            System.out.println("COUNT:" + stringTokenizer.countTokens());
            }
            */

            System.out.println("QUERY PARAMS:" + param);
            String[] paramArray = param.split(",");
            // System.out.println("VALUE:" + paramArray[0]);
            // System.out.println("VALUE:" + paramArray[1]);
            // System.out.println("VALUE:" + paramArray[2]);

            String value_1 = paramArray[0];
            String value_2 = paramArray[1];
            String value_3 = paramArray[2];

            String serviceName = "eport.superpass.spdec.DecQueryListService";
            byte[] requestContext = ("<?xml version=\"1.0\" encoding=\"utf-8\" standalone=\"yes\"?>\n"
                    + "<RequestContext>\n" + "    <Group name=\"SystemInfo\">\n"
                    + "        <Key name=\"NAME_FULL\">???</Key>\n"
                    + "        <Key name=\"ClientId\">5300001976914</Key>\n"
                    + "        <Key name=\"CertNo\">df630b</Key>\n"
                    + "        <Key name=\"SaicSysNo\">766350979</Key>\n"
                    + "        <Key name=\"DEP_IN_CODE\">5300</Key>\n"
                    + "        <Key name=\"REG_CO_CGAC\">4403180237</Key>\n"
                    + "        <Key name=\"ENT_SEQ_NO\">000000000000315537</Key>\n"
                    + "        <Key name=\"ENT_TYPE\">3</Key>\n"
                    + "        <Key name=\"IcCode\">8930000011040</Key>\n"
                    + "        <Key name=\"OperatorName\">?</Key>\n"
                    + "        <Key name=\"DEP_CODE_CHG\">5305</Key>\n"
                    + "        <Key name=\"SessionId\">AE2533938D521A9972186B07BBBEB244</Key>\n"
                    + "    </Group>\n" + "    <Group name=\"DataPresentation\">\n"
                    + "        <Key name=\"SignatureAlgorithm\"/>\n"
                    + "        <Key name=\"EncryptAlgorithm\"/>\n"
                    + "        <Key name=\"CompressAlgorithm\"/>\n" + "    </Group>\n"
                    + "    <Group name=\"Default\">\n"
                    + "        <Key name=\"clientSystemId\">0400620001</Key>\n"
                    + "        <Key name=\"needWebInvoke\">True</Key>\n" + "    </Group>\n"
                    + "</RequestContext>").getBytes();

            byte[] requestData = ("<?xml version=\"1.0\"?>\n"
                    + "<DecQueryListRequest xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\">\n"
                    + "  <OperType>0</OperType>\n" + "  <DecType>\n" + "    <TrnType>0</TrnType>\n"
                    + "    <IEFlag>" + value_3 + "</IEFlag>\n" + "    <DecSubType />\n" + "  </DecType>\n"
                    + "  <CopeCode>766350979</CopeCode>\n" + "  <AgentCode>4403180237</AgentCode>\n"
                    + "  <SeqNo>" + value_1 + "</SeqNo>\n" + "  <UserType>0</UserType>\n"
                    + "</DecQueryListRequest>").getBytes();

            Holder<byte[]> responseData = new Holder<>();

            // <?xml version="1.0" encoding="UTF-8" standalone="no"?><ResponseContext><ResponseCode>0</ResponseCode><ResponseMessage>success</ResponseMessage><ServiceResponseCode>0</ServiceResponseCode><ServiceResponseMessage>?</ServiceResponseMessage><ExceptionDetail/><Group name="DataPresentation"><Key name="CompressAlgorithm"/><Key name="SignatureAlgorithm"/><Key name="EncryptAlgorithm"/></Group></ResponseContext>
            // <?xml version="1.0" encoding="UTF-8" standalone="yes"?><DecQueryListResponse><QueryResponseData><EntryId>531820161181010544</EntryId><SeqNo>000000001139524197</SeqNo><BillNo>2016051920160523</BillNo><IEDate>20160621</IEDate><TradeMode>0615</TradeMode><ItemsNum>19</ItemsNum><TrafName></TrafName><Status>O</Status><AgentName>???</AgentName><IEFlag>I</IEFlag><CustomsCode>5318</CustomsCode><DeclTrnRel>0</DeclTrnRel><RetExplain>;?</RetExplain><NoticeDate>2016-06-29</NoticeDate><TradeName>()??</TradeName><ExtendField><DecDeclareSysType>2</DecDeclareSysType><TrnSysType>1</TrnSysType><AssureExamRet>0</AssureExamRet><RelatedDocumentType>  </RelatedDocumentType><DeclareSeqNo>                  </DeclareSeqNo><ExtendField53>P</ExtendField53><ExtendField>21                                                   P</ExtendField></ExtendField><EntryType>M</EntryType></QueryResponseData></DecQueryListResponse>
            // String responseContext = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?><ResponseContext><ResponseCode>0</ResponseCode><ResponseMessage>success</ResponseMessage><ServiceResponseCode>0</ServiceResponseCode><ServiceResponseMessage>?</ServiceResponseMessage><ExceptionDetail/><Group name=\"DataPresentation\"><Key name=\"CompressAlgorithm\"/><Key name=\"SignatureAlgorithm\"/><Key name=\"EncryptAlgorithm\"/></Group></ResponseContext>";
            // String queryListResponse = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?><DecQueryListResponse><QueryResponseData><EntryId>531820161181010544</EntryId><SeqNo>000000001139524197</SeqNo><BillNo>2016051920160523</BillNo><IEDate>20160621</IEDate><TradeMode>0615</TradeMode><ItemsNum>19</ItemsNum><TrafName></TrafName><Status>O</Status><AgentName>???</AgentName><IEFlag>I</IEFlag><CustomsCode>5318</CustomsCode><DeclTrnRel>0</DeclTrnRel><RetExplain>;?</RetExplain><NoticeDate>2016-06-29</NoticeDate><TradeName>()??</TradeName><ExtendField><DecDeclareSysType>2</DecDeclareSysType><TrnSysType>1</TrnSysType><AssureExamRet>0</AssureExamRet><RelatedDocumentType>  </RelatedDocumentType><DeclareSeqNo>                  </DeclareSeqNo><ExtendField53>P</ExtendField53><ExtendField>21                                                   P</ExtendField></ExtendField><EntryType>M</EntryType></QueryResponseData></DecQueryListResponse>";

            String responseContext = new String(
                    superPass.service(serviceName, requestContext, requestData, responseData));
            String queryListResponse = new String(responseData.value);
            System.out.println("RESPONSE_CONTEXT:" + responseContext);
            System.out.println("QUERY_LIST_RESPONSE:" + queryListResponse);

            String serviceResponseCode = parsingReceiptStatus(responseContext);
            System.out.println("SERVICE_RESPONSE_CODE:" + serviceResponseCode);
            if (serviceResponseCode.equals("0")) {
                String data = parsingReceiptData(queryListResponse);
                System.out.println("DATA:" + data);
                succeededWriter.write(data);
                succeededWriter.flush();
            } else {
                failedWriter.write(param + "\n");
                failedWriter.flush();
            }
            Thread.sleep(6 * 1000);
        } catch (Exception e) {
            failedWriter.write(param + "\n");
            failedWriter.flush();
        }
    }
    succeededWriter.close();
    failedWriter.close();
}

From source file:net.cyllene.hackerrank.downloader.HackerrankDownloader.java

public static void main(String[] args) {
    // Parse arguments and set up the defaults
    DownloaderSettings.cmd = parseArguments(args);

    if (DownloaderSettings.cmd.hasOption("help")) {
        printHelp();/*from   ww  w .j  ava2s .  c o  m*/
        System.exit(0);
    }

    if (DownloaderSettings.cmd.hasOption("verbose")) {
        DownloaderSettings.beVerbose = true;
    }

    /**
     * Output directory logic:
     * 1) if directory exists, ask for -f option to overwrite, quit with message
     * 2) if -f flag is set, check if user has access to a parent directory
     * 3) if no access, quit with error
     * 4) if everything is OK, remember the path
     */
    String sDesiredPath = DownloaderSettings.outputDir;
    if (DownloaderSettings.cmd.hasOption("directory")) {
        sDesiredPath = DownloaderSettings.cmd.getOptionValue("d", DownloaderSettings.outputDir);
    }
    if (DownloaderSettings.beVerbose) {
        System.out.println("Checking output dir: " + sDesiredPath);
    }
    Path desiredPath = Paths.get(sDesiredPath);
    if (Files.exists(desiredPath) && Files.isDirectory(desiredPath)) {
        if (!DownloaderSettings.cmd.hasOption("f")) {
            System.out.println("I wouldn't like to overwrite existing directory: " + sDesiredPath
                    + ", set the --force flag if you are sure. May lead to data loss, be careful.");
            System.exit(0);
        } else {
            System.out.println(
                    "WARNING!" + System.lineSeparator() + "--force flag is set. Overwriting directory: "
                            + sDesiredPath + System.lineSeparator() + "WARNING!");
        }
    }
    if ((Files.exists(desiredPath) && !Files.isWritable(desiredPath))
            || !Files.isWritable(desiredPath.getParent())) {
        System.err
                .println("Fatal error: " + sDesiredPath + " cannot be created or modified. Check permissions.");
        // TODO: use Exceptions instead of system.exit
        System.exit(1);
    }
    DownloaderSettings.outputDir = sDesiredPath;

    Integer limit = DownloaderSettings.ITEMS_TO_DOWNLOAD;
    if (DownloaderSettings.cmd.hasOption("limit")) {
        try {
            limit = ((Number) DownloaderSettings.cmd.getParsedOptionValue("l")).intValue();
        } catch (ParseException e) {
            System.out.println("Incorrect limit: " + e.getMessage() + System.lineSeparator()
                    + "Using default value: " + limit);
        }
    }

    Integer offset = DownloaderSettings.ITEMS_TO_SKIP;
    if (DownloaderSettings.cmd.hasOption("offset")) {
        try {
            offset = ((Number) DownloaderSettings.cmd.getParsedOptionValue("o")).intValue();
        } catch (ParseException e) {
            System.out.println("Incorrect offset: " + e.getMessage() + " Using default value: " + offset);
        }
    }

    DownloaderCore dc = DownloaderCore.INSTANCE;

    List<HRChallenge> challenges = new LinkedList<>();

    // Download everything first
    Map<String, List<Integer>> structure = null;
    try {
        structure = dc.getStructure(offset, limit);
    } catch (IOException e) {
        System.err.println("Fatal Error: could not get data structure.");
        e.printStackTrace();
        System.exit(1);
    }

    challengesLoop: for (Map.Entry<String, List<Integer>> entry : structure.entrySet()) {
        String challengeSlug = entry.getKey();
        HRChallenge currentChallenge = null;
        try {
            currentChallenge = dc.getChallengeDetails(challengeSlug);
        } catch (IOException e) {
            System.err.println("Error: could not get challenge info for: " + challengeSlug);
            if (DownloaderSettings.beVerbose) {
                e.printStackTrace();
            }
            continue challengesLoop;
        }

        submissionsLoop: for (Integer submissionId : entry.getValue()) {
            HRSubmission submission = null;
            try {
                submission = dc.getSubmissionDetails(submissionId);
            } catch (IOException e) {
                System.err.println("Error: could not get submission info for: " + submissionId);
                if (DownloaderSettings.beVerbose) {
                    e.printStackTrace();
                }
                continue submissionsLoop;
            }

            // TODO: probably should move filtering logic elsewhere(getStructure, maybe)
            if (submission.getStatus().equalsIgnoreCase("Accepted")) {
                currentChallenge.getSubmissions().add(submission);
            }
        }

        challenges.add(currentChallenge);
    }

    // Now dump all data to disk
    try {
        for (HRChallenge currentChallenge : challenges) {
            if (currentChallenge.getSubmissions().isEmpty())
                continue;

            final String sChallengePath = DownloaderSettings.outputDir + "/" + currentChallenge.getSlug();
            final String sSolutionPath = sChallengePath + "/accepted_solutions";
            final String sDescriptionPath = sChallengePath + "/problem_description";

            Files.createDirectories(Paths.get(sDescriptionPath));
            Files.createDirectories(Paths.get(sSolutionPath));

            // FIXME: this should be done the other way
            String plainBody = currentChallenge.getDescriptions().get(0).getBody();
            String sFname;
            if (!plainBody.equals("null")) {
                sFname = sDescriptionPath + "/english.txt";
                if (DownloaderSettings.beVerbose) {
                    System.out.println("Writing to: " + sFname);
                }

                Files.write(Paths.get(sFname), plainBody.getBytes(StandardCharsets.UTF_8.name()));
            }

            String htmlBody = currentChallenge.getDescriptions().get(0).getBodyHTML();
            String temporaryHtmlTemplate = "<html></body>" + htmlBody + "</body></html>";

            sFname = sDescriptionPath + "/english.html";
            if (DownloaderSettings.beVerbose) {
                System.out.println("Writing to: " + sFname);
            }
            Files.write(Paths.get(sFname), temporaryHtmlTemplate.getBytes(StandardCharsets.UTF_8.name()));

            for (HRSubmission submission : currentChallenge.getSubmissions()) {
                sFname = String.format("%s/%d.%s", sSolutionPath, submission.getId(), submission.getLanguage());
                if (DownloaderSettings.beVerbose) {
                    System.out.println("Writing to: " + sFname);
                }

                Files.write(Paths.get(sFname),
                        submission.getSourceCode().getBytes(StandardCharsets.UTF_8.name()));
            }

        }
    } catch (IOException e) {
        System.err.println("Fatal Error: couldn't dump data to disk.");
        System.exit(1);
    }
}

From source file:edu.washington.data.sentimentreebank.StanfordNLPDict.java

public static void main(String args[]) {
    Options options = new Options();
    options.addOption("d", "dict", true, "dictionary file.");
    options.addOption("s", "sentiment", true, "sentiment value file.");

    CommandLineParser parser = new GnuParser();
    try {/* w  w w.  ja  va  2 s . c  om*/
        CommandLine line = parser.parse(options, args);
        if (!line.hasOption("dict") && !line.hasOption("sentiment")) {
            HelpFormatter formatter = new HelpFormatter();
            formatter.printHelp("StanfordNLPDict", options);
            return;
        }

        Path dictPath = Paths.get(line.getOptionValue("dict"));
        Path sentimentPath = Paths.get(line.getOptionValue("sentiment"));

        StanfordNLPDict snlp = new StanfordNLPDict(dictPath, sentimentPath);
        String sentence = "take off";
        System.out.printf("sentence [%1$s] %2$s\n", sentence,
                String.valueOf(snlp.getPhraseSentiment(sentence)));

    } catch (ParseException exp) {
        System.err.println("Parsing failed.  Reason: " + exp.getMessage());
        HelpFormatter formatter = new HelpFormatter();
        formatter.printHelp("StanfordNLPDict", options);
    } catch (IOException ex) {
        Logger.getLogger(StanfordNLPDict.class.getName()).log(Level.SEVERE, null, ex);
    }
}

From source file:SecurityWatch.java

public static void main(String[] args) {

    final Path path = Paths.get("C:/security");
    SecurityWatch watch = new SecurityWatch();

    try {/*from  w w w  .  j a  va 2  s  . com*/
        watch.watchVideoCamera(path);
    } catch (IOException | InterruptedException ex) {
        System.err.println(ex);
    }

}

From source file:com.datazuul.iiif.presentation.api.ManifestGenerator.java

public static void main(String[] args)
        throws ParseException, JsonProcessingException, IOException, URISyntaxException {
    Options options = new Options();
    options.addOption("d", true, "Absolute file path to the directory containing the image files.");

    CommandLineParser parser = new DefaultParser();
    CommandLine cmd = parser.parse(options, args);

    if (cmd.hasOption("d")) {
        String imageDirectoryPath = cmd.getOptionValue("d");
        Path imageDirectory = Paths.get(imageDirectoryPath);
        final List<Path> files = new ArrayList<>();
        try {/*from  w  ww.j ava  2s  .  c o m*/
            Files.walkFileTree(imageDirectory, new SimpleFileVisitor<Path>() {
                @Override
                public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
                    if (!attrs.isDirectory()) {
                        // TODO there must be a more elegant solution for filtering jpeg files...
                        if (file.getFileName().toString().endsWith("jpg")) {
                            files.add(file);
                        }
                    }
                    return FileVisitResult.CONTINUE;
                }
            });
        } catch (IOException e) {
            e.printStackTrace();
        }
        Collections.sort(files, new Comparator() {
            @Override
            public int compare(Object fileOne, Object fileTwo) {
                String filename1 = ((Path) fileOne).getFileName().toString();
                String filename2 = ((Path) fileTwo).getFileName().toString();

                try {
                    // numerical sorting
                    Integer number1 = Integer.parseInt(filename1.substring(0, filename1.lastIndexOf(".")));
                    Integer number2 = Integer.parseInt(filename2.substring(0, filename2.lastIndexOf(".")));
                    return number1.compareTo(number2);
                } catch (NumberFormatException nfe) {
                    // alpha-numerical sorting
                    return filename1.compareToIgnoreCase(filename2);
                }
            }
        });

        generateManifest(imageDirectory.getFileName().toString(), files);
    } else {
        // automatically generate the help statement
        HelpFormatter formatter = new HelpFormatter();
        formatter.printHelp("ManifestGenerator", options);
    }
}

From source file:com.thesmartweb.swebrank.Main.java

/**
 * @param args the command line arguments
 *//*from  w  ww  . jav a 2s  . co m*/
public static void main(String[] args) {
    Path input_path = Paths.get("//mnt//var//DBs//inputsL10//nba//");//input directory
    String output_parent_directory = "//mnt//var//DBs//outputsConfL10//nba//";//output directory
    String config_path = "//mnt//var//DBs//config//";//input directory
    //---Disable apache log manually----
    //System.setProperty("org.apache.commons.logging.Log","org.apache.commons.logging.impl.NoOpLog");
    System.setProperty("org.apache.commons.logging.Log", "org.apache.commons.logging.impl.Log4JLogger");
    //--------------Domain that is searched----------
    String domain = "";
    //------------------search engine related options----------------------
    List<String> queries = null;
    int results_number = 0;//the number of results that are returned from each search engine
    List<Boolean> enginechoice = null;
    //list element #0. True/False Bing
    //list element #1. True/False Google
    //list element #2. True/False Yahoo!
    //list element #3. True/False Merged
    //-----------Moz options---------------------
    List<Boolean> mozMetrics = null;
    //The list is going to contain the moz related input in the following order
    //list element #1. True/False, True we use Moz API, false not
    //list element #2. True if we use Domain Authority
    //list element #3. True if we use External MozRank
    //list element #4. True if we use MozRank
    //list element #5. True if we use MozTrust
    //list element #6. True if we use Subdomain MozRank
    //list element #7. True if we use Page Authority
    //only one is used (the first to be set to true)
    boolean moz_threshold_option = false;//set to true we use the threshold
    Double moz_threshold = 0.0;//if we want to have a threshold in moz
    int top_count_moz = 0;//if we want to get the moz top-something results
    //---------------Semantic Analysis method----------------
    List<Boolean> ContentSemantics = null;
    int SensebotConcepts = 0;//define the amount of concepts that sensebot is going to recognize
    List<Double> SWebRankSettings = null;
    //------(string)directory is going to be used later-----
    String output_child_directory;
    //-------we get all the paths of the txt (input) files from the input directory-------
    DataManipulation getfiles = new DataManipulation();//class responsible for the extraction of paths
    Collection<File> inputs_files;//array to include the paths of the txt files
    inputs_files = getfiles.getinputfiles(input_path.toString(), "txt");//method to retrieve all the path of the input documents
    //------------read the txt files------------
    for (File input : inputs_files) {
        ReadInput ri = new ReadInput();//function to read the input
        boolean check_reading_input = ri.perform(input);
        if (check_reading_input) {
            domain = ri.domain;
            //----------
            queries = ri.queries;
            results_number = ri.results_number;
            enginechoice = ri.enginechoice;
            //------------
            mozMetrics = ri.mozMetrics;
            moz_threshold_option = ri.moz_threshold_option;
            moz_threshold = ri.moz_threshold.doubleValue();
            //---------------
            ContentSemantics = ri.ContentSemantics;
            SWebRankSettings = ri.SWebRankSettings;
        }
        int top_visible = 0;//option to set the amount of results you can get in the merged search engine
        //------if we choose to use a Moz metric or Visibility score for our ranking, we need to set the results_number for the search engines to its max which is 50 
        //-----we set the top results number for moz or Visibility rank----
        if (mozMetrics.get(0) || enginechoice.get(3)) {
            if (mozMetrics.get(0)) {
                top_count_moz = results_number;
            } //if moz is true, top_count_moz gets the value of result number
            if (enginechoice.get(3)) {
                top_visible = results_number;
            } //if merged engine is true, top_visible gets the value of result number
            results_number = 50;//this is the max amount of results that you can get from the search engine APIs
        }
        //-----if we want to use Moz we should check first if it works
        if (mozMetrics.get(0)) {
            Moz Moz = new Moz();
            //---if it works, moz remains true, otherwise it is set to false
            mozMetrics.add(0, Moz.check(config_path));
            //if it is false and we have chosen to use Visibility score with Moz, we reset back to the standard settings (ranking and not merged)
            //therefore, we reset the number of results from 50 to the top_count_moz which contained the original number of results
            if (!mozMetrics.get(0)) {
                if (!enginechoice.get(3)) {
                    results_number = top_count_moz;
                }
            }
        }
        //----------we set the wordLists that we are going to use---------------------
        List<String> finalList = new ArrayList<String>();//finalList is going to contain all the content in the end
        Total_analysis ta = new Total_analysis();//we call total analysis
        int iteration_counter = 0;//the iteration_counter is used in order to count the number of iterations of the algorithm and to be checked with perf_limit
        //this list of arraylists  is going to contain all the wordLists that are produced for every term of the String[] query,
        //in order to calculate the NGD scores between every term of the wordList and the term that was used as query in order to produce the spesific wordList
        List<ArrayList<String>> array_wordLists = new ArrayList<>();
        List<String> wordList_previous = new ArrayList<>();
        List<String> wordList_new = new ArrayList<>();
        double convergence = 0;//we create the convergence percentage and initialize it
        String conv_percentages = "";//string that contains all the convergence percentages
        DataManipulation wordsmanipulation = new DataManipulation();//method to manipulate various word data (String, list<String>, etc)
        do { //if we run the algorithm for the 1st time we already have the query so we skip the loop below that produces the new array of query
            if (iteration_counter != 0) {
                wordList_previous = wordList_new;
                //we add the previous wordList to the finalList
                finalList = wordsmanipulation.AddAList(wordList_previous, finalList);
                List<String> query_new_list_total = new ArrayList<>();
                int iteration_previous = iteration_counter - 1;
                Combinations_Engine cn = new Combinations_Engine();//call the class to combine the terms produced
                for (String query : queries) {
                    List<String> ids = new ArrayList<>();
                    if (enginechoice.get(0)) {
                        String id = domain + "/" + query + "/bing" + "/" + iteration_previous;
                        ids.add(id);
                    }
                    if (enginechoice.get(1)) {
                        String id = domain + "/" + query + "/google" + "/" + iteration_previous;
                        ids.add(id);
                    }
                    if (enginechoice.get(2)) {
                        String id = domain + "/" + query + "/yahoo" + "/" + iteration_previous;
                        ids.add(id);
                    }
                    ElasticGetWordList ESget = new ElasticGetWordList();//we call this class to get the wordlist from the Elastic Search
                    List<String> maxWords = ESget.getMaxWords(ids, SWebRankSettings.get(9).intValue(),
                            config_path);//we are going to get a max amount of words
                    int query_index = queries.indexOf(query);
                    int size_query_new = SWebRankSettings.get(10).intValue();//the amount of new queries we are willing to create
                    //we create the new queries for every query of the previous round by combining the words produced from this query
                    List<String> query_new_list = cn.perform(maxWords, SWebRankSettings.get(7), queries,
                            SWebRankSettings.get(6), query_index, size_query_new, config_path);
                    //we add the list of new queries to the total list that containas all the new queries
                    query_new_list_total.addAll(query_new_list);
                    System.out.println("query pointer=" + query_index + "");
                }
                //---------------------the following cleans a list from null and duplicates
                query_new_list_total = wordsmanipulation.clearListString(query_new_list_total);
                //--------------we create the new directory that our files are going to be saved 
                String txt_directory = FilenameUtils.getBaseName(input.getName());
                output_child_directory = output_parent_directory + txt_directory + "_level_" + iteration_counter
                        + "//";
                //----------------append the wordlist to a file------------------
                wordsmanipulation.AppendWordList(query_new_list_total,
                        output_child_directory + "queries_" + iteration_counter + ".txt");
                if (query_new_list_total.size() < 1) {
                    break;
                } //if we don't create new queries we end the while loop
                //total analysis' function is going to do all the work and return back what we need
                ta = new Total_analysis();
                ta.perform(wordList_previous, iteration_counter, output_child_directory, domain, enginechoice,
                        query_new_list_total, results_number, top_visible, mozMetrics, moz_threshold_option,
                        moz_threshold.doubleValue(), top_count_moz, ContentSemantics, SensebotConcepts,
                        SWebRankSettings, config_path);
                //we get the array of wordlists
                array_wordLists = ta.getarray_wordLists();
                //get the wordlist that includes all the new queries
                wordList_new = ta.getwordList_total();
                //---------------------the following cleans a list from null and duplicates-------------
                wordList_new = wordsmanipulation.clearListString(wordList_new);
                //----------------append the wordlist to a file--------------------
                wordsmanipulation.AppendWordList(wordList_new, output_child_directory + "wordList.txt");
                //the concergence percentage of this iteration
                convergence = ta.getConvergence();//we are going to use convergence score to check the convergence
                //a string that contains all the convergence percentage for each round separated by \n character
                conv_percentages = conv_percentages + "\n" + convergence;
                //a file that is going to include the convergence percentages
                wordsmanipulation.AppendString(conv_percentages,
                        output_child_directory + "convergence_percentage.txt");
                //we add the new wordList to the finalList
                finalList = wordsmanipulation.AddAList(wordList_new, finalList);
                //we set the query array to be equal to the query new total that we have created
                queries = query_new_list_total;
                //we increment the iteration_counter in order to count the iterations of the algorithm and to use the perf_limit
                iteration_counter++;
            } else {//the following source code is performed on the 1st run of the loop
                    //------------we extract the parent path of the file 
                String txt_directory = FilenameUtils.getBaseName(input.getName());
                //----------we create a string that is going to be used for the corresponding directory of outputs
                output_child_directory = output_parent_directory + txt_directory + "_level_" + iteration_counter
                        + "//";
                //we call total analysis function performOld
                ta.perform(wordList_new, iteration_counter, output_child_directory, domain, enginechoice,
                        queries, results_number, top_visible, mozMetrics, moz_threshold_option,
                        moz_threshold.doubleValue(), top_count_moz, ContentSemantics, SensebotConcepts,
                        SWebRankSettings, config_path);
                //we get the array of wordlists
                array_wordLists = ta.getarray_wordLists();
                //get the wordlist that includes all the new queries
                wordList_new = ta.getwordList_total();
                //---------------------the following cleans a list from null and duplicates
                wordList_new = wordsmanipulation.clearListString(wordList_new);
                //----------------append the wordlist to a file
                wordsmanipulation.AppendWordList(wordList_new, output_child_directory + "wordList.txt");
                //-----------------------------------------
                iteration_counter++;//increase the iteration_counter that counts the iterations of the algorithm
            }
        } while (convergence < SWebRankSettings.get(5).doubleValue()
                && iteration_counter < SWebRankSettings.get(8).intValue());//while the convergence percentage is below the limit and the iteration_counter below the performance limit
        if (iteration_counter == 1) {
            finalList = wordsmanipulation.AddAList(wordList_new, finalList);
        }
        //--------------------content List----------------
        if (!finalList.isEmpty()) {
            //---------------------the following cleans the final list from null and duplicates
            finalList = wordsmanipulation.clearListString(finalList);
            //write the keywords to a file
            boolean flag_file = false;//boolean flag to declare successful write to file
            flag_file = wordsmanipulation.AppendWordList(finalList,
                    output_parent_directory + "total_content.txt");
            if (!flag_file) {
                System.out.print("can not create the content file for: " + output_parent_directory
                        + "total_content.txt");
            }
        }
        //we are going to save the total content with its convergence on the ElasticSearch cluster in a separated index
        //Node node = nodeBuilder().client(true).clusterName("lshrankldacluster").node();
        //Client client = node.client();
        //get the elastic search indexes in a list
        List<String> elasticIndexes = ri.GetKeyFile(config_path, "elasticSearchIndexes");
        Settings settings = ImmutableSettings.settingsBuilder().put("cluster.name", "lshrankldacluster")
                .build();
        Client client = new TransportClient(settings)
                .addTransportAddress(new InetSocketTransportAddress("localhost", 9300));
        JSONObject objEngineLevel = new JSONObject();
        objEngineLevel.put("TotalContent", finalList);//we save the total content
        objEngineLevel.put("Convergences", conv_percentages);//we save the convergence percentages
        IndexRequest indexReq = new IndexRequest(elasticIndexes.get(0), "content", domain);//we save also the domain 
        indexReq.source(objEngineLevel);
        IndexResponse indexRes = client.index(indexReq).actionGet();
        //node.close();
        client.close();
        //----------------------convergence percentages writing to file---------------
        //use the conv_percentages string
        if (conv_percentages.length() != 0) {
            boolean flag_file = false;//boolean flag to declare successful write to file
            flag_file = wordsmanipulation.AppendString(conv_percentages,
                    output_parent_directory + "convergence_percentages.txt");
            if (!flag_file) {
                System.out.print("can not create the convergence file for: " + output_parent_directory
                        + "convergence_percentages.txt");
            }
        }
    }
}