Example usage for java.io File setWritable

List of usage examples for java.io File setWritable

Introduction

In this page you can find the example usage for java.io File setWritable.

Prototype

public boolean setWritable(boolean writable, boolean ownerOnly) 

Source Link

Document

Sets the owner's or everybody's write permission for this abstract pathname.

Usage

From source file:org.apache.flink.runtime.blob.BlobCachePutTest.java

/**
 * Uploads a byte array to a server which cannot create incoming files via the {@link
 * BlobCacheService}. File transfers should fail.
 *
 * @param jobId/*from   w  w  w.j a  va  2  s .  co m*/
 *       job id
 * @param blobType
 *       whether the BLOB should become permanent or transient
 */
private void testPutBufferFailsIncoming(@Nullable final JobID jobId, BlobKey.BlobType blobType)
        throws IOException {
    assumeTrue(!OperatingSystem.isWindows()); //setWritable doesn't work on Windows.

    final Configuration config = new Configuration();
    config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath());

    File tempFileDir = null;
    try (BlobServer server = new BlobServer(config, new VoidBlobStore());
            BlobCacheService cache = new BlobCacheService(config, new VoidBlobStore(),
                    new InetSocketAddress("localhost", server.getPort()))) {

        server.start();

        // make sure the blob server cannot create any files in its storage dir
        tempFileDir = server.createTemporaryFilename().getParentFile();
        assertTrue(tempFileDir.setExecutable(true, false));
        assertTrue(tempFileDir.setReadable(true, false));
        assertTrue(tempFileDir.setWritable(false, false));

        byte[] data = new byte[2000000];
        rnd.nextBytes(data);

        // upload the file to the server via the cache
        exception.expect(IOException.class);
        exception.expectMessage("PUT operation failed: ");

        try {
            put(cache, jobId, data, blobType);
        } finally {
            File storageDir = tempFileDir.getParentFile();
            // only the incoming directory should exist (no job directory!)
            assertArrayEquals(new String[] { "incoming" }, storageDir.list());
        }
    } finally {
        // set writable again to make sure we can remove the directory
        if (tempFileDir != null) {
            //noinspection ResultOfMethodCallIgnored
            tempFileDir.setWritable(true, false);
        }
    }
}

From source file:org.apache.flink.runtime.blob.BlobCachePutTest.java

/**
 * Uploads a byte array to a server which cannot create any files via the {@link
 * BlobCacheService}. File transfers should fail.
 *
 * @param jobId/*from w  w w .  ja va 2s. com*/
 *       job id
 * @param blobType
 *       whether the BLOB should become permanent or transient
 */
private void testPutBufferFails(@Nullable final JobID jobId, BlobKey.BlobType blobType) throws IOException {
    assumeTrue(!OperatingSystem.isWindows()); //setWritable doesn't work on Windows.

    final Configuration config = new Configuration();
    config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath());

    File tempFileDir = null;
    try (BlobServer server = new BlobServer(config, new VoidBlobStore());
            BlobCacheService cache = new BlobCacheService(config, new VoidBlobStore(),
                    new InetSocketAddress("localhost", server.getPort()))) {

        server.start();

        // make sure the blob server cannot create any files in its storage dir
        tempFileDir = server.createTemporaryFilename().getParentFile().getParentFile();
        assertTrue(tempFileDir.setExecutable(true, false));
        assertTrue(tempFileDir.setReadable(true, false));
        assertTrue(tempFileDir.setWritable(false, false));

        byte[] data = new byte[2000000];
        rnd.nextBytes(data);

        // upload the file to the server via the cache
        exception.expect(IOException.class);
        exception.expectMessage("PUT operation failed: ");

        put(cache, jobId, data, blobType);

    } finally {
        // set writable again to make sure we can remove the directory
        if (tempFileDir != null) {
            //noinspection ResultOfMethodCallIgnored
            tempFileDir.setWritable(true, false);
        }
    }
}

From source file:org.apache.flink.runtime.blob.BlobCachePutTest.java

/**
 * Uploads a byte array to a server which cannot create files via the {@link BlobCacheService}.
 * File transfers should fail./*w  ww.  j  av  a  2  s  .  c  o m*/
 *
 * @param jobId
 *       job id
 * @param blobType
 *       whether the BLOB should become permanent or transient
 */
private void testPutBufferFailsStore(@Nullable final JobID jobId, BlobKey.BlobType blobType)
        throws IOException {
    assumeTrue(!OperatingSystem.isWindows()); //setWritable doesn't work on Windows.

    final Configuration config = new Configuration();
    config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath());

    File jobStoreDir = null;
    try (BlobServer server = new BlobServer(config, new VoidBlobStore());
            BlobCacheService cache = new BlobCacheService(config, new VoidBlobStore(),
                    new InetSocketAddress("localhost", server.getPort()))) {

        server.start();

        // make sure the blob server cannot create any files in its storage dir
        jobStoreDir = server.getStorageLocation(jobId, BlobKey.createKey(blobType)).getParentFile();
        assertTrue(jobStoreDir.setExecutable(true, false));
        assertTrue(jobStoreDir.setReadable(true, false));
        assertTrue(jobStoreDir.setWritable(false, false));

        byte[] data = new byte[2000000];
        rnd.nextBytes(data);

        // upload the file to the server via the cache
        exception.expect(IOException.class);
        exception.expectMessage("PUT operation failed: ");

        try {
            put(cache, jobId, data, blobType);
        } finally {
            // there should be no remaining incoming files
            File incomingFileDir = new File(jobStoreDir.getParent(), "incoming");
            assertArrayEquals(new String[] {}, incomingFileDir.list());

            // there should be no files in the job directory
            assertArrayEquals(new String[] {}, jobStoreDir.list());
        }
    } finally {
        // set writable again to make sure we can remove the directory
        if (jobStoreDir != null) {
            //noinspection ResultOfMethodCallIgnored
            jobStoreDir.setWritable(true, false);
        }
    }
}

From source file:skoa.helpers.ConfiguracionGraficas.java

private void inicial() {
    Date fechaActual = new Date();
    NombreCarpetaActual = formatoDelTextoCarpeta.format(fechaActual);
    //Para cada consulta, se crea una carpeta con la fecha y hora de realizacion de la consulta,
    //para almacenar en ella todos los resultados y graficas de esa consulta.
    String os = System.getProperty("os.name");
    //En Windows las barras son \ y en Linux /.
    if (os.indexOf("Win") >= 0)
        ruta_destino = ruta_jar + "\\Consultas\\";
    else/*  w w  w  . j  a v  a  2  s .  c  o m*/
        ruta_destino = ruta_jar + "/Consultas/";
    ruta_destino = ruta_destino + NombreCarpetaActual;
    File destino_consulta = new File(ruta_destino);
    destino_consulta.setExecutable(true, false);
    destino_consulta.setReadable(true, false);
    destino_consulta.setWritable(true, false);
    //HACEMOS LA CREACION DE LA CARPETA DESTINO PARA LA NUEVA CONSULTA POSTERIOR.
    if (destino_consulta.mkdir()) {
    } //System.out.println("**SE CREA DIR.   "+ruta_destino);
    else { //En caso de existir ya el directorio, porque hemos hecho las consultas muy rapido
           //y estas coinciden en su nombre, ya que el nombre es la hora en que se consulta
           //le vamos a aadir un numero indicando que es una consulta a la misma hora.
        String aux = NombreCarpetaActual.substring(NombreCarpetaActual.indexOf(" ") + 1); //Contamos desde el primer espacio
        if (aux.indexOf(" ") != -1) {
            aux = aux.substring(aux.length() - 1); //Cogemos el ultimo caracter.
            int naux = Integer.parseInt(aux); //Lo pasamos a entero
            naux++; //Lo incrementamos en 1
            NombreCarpetaActual = NombreCarpetaActual + " " + naux;
        } else
            NombreCarpetaActual = NombreCarpetaActual + " 1";
        if (os.indexOf("Win") >= 0)
            ruta_destino = ruta_jar + "\\Consultas\\";
        else
            ruta_destino = ruta_jar + "/Consultas/";
        //ruta_destino=ruta_jar+"\\Consultas\\";
        ruta_destino = ruta_destino + NombreCarpetaActual;
        destino_consulta = new File(ruta_destino);
        destino_consulta.setExecutable(true, false);
        destino_consulta.setReadable(true, false);
        destino_consulta.setWritable(true, false);
        destino_consulta.mkdir();
    }
    interfaz.getContentPane().setLayout(new BorderLayout());
    panel.add(opciones); //No se aade esta opcion en cargarVista() para generalizarla y usarla siempre.
    panel.add(opciones2); //No se aade esta opcion en cargarVista() para generalizarla y usarla siempre.
    cargarVista();
    interfaz.getContentPane().add(oeste, "West");
    interfaz.getContentPane().add(centro, "Center");
    interfaz.setVisible(true);
}

From source file:org.elasticsearch.hadoop.integration.hive.HiveEmbeddedServer.java

private HiveConf configure() throws Exception {
    String scratchDir = NTFSLocalFileSystem.SCRATCH_DIR;

    File scratchDirFile = new File(scratchDir);
    TestUtils.delete(scratchDirFile);/*w  w  w  .j a  v a 2 s  . c o m*/

    Configuration cfg = new Configuration();
    HiveConf conf = new HiveConf(cfg, HiveConf.class);
    conf.addToRestrictList("columns.comments");
    refreshConfig(conf);

    HdpBootstrap.hackHadoopStagingOnWin();

    // work-around for NTFS FS
    // set permissive permissions since otherwise, on some OS it fails
    if (TestUtils.isWindows()) {
        conf.set("fs.file.impl", NTFSLocalFileSystem.class.getName());
        conf.set("hive.scratch.dir.permission", "650");
        conf.setVar(ConfVars.SCRATCHDIRPERMISSION, "650");
        conf.set("hive.server2.enable.doAs", "false");
        conf.set("hive.execution.engine", "mr");
        //conf.set("hadoop.bin.path", getClass().getClassLoader().getResource("hadoop.cmd").getPath());
        System.setProperty("path.separator", ";");
        conf.setVar(HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER,
                DummyHiveAuthenticationProvider.class.getName());
    } else {
        conf.set("hive.scratch.dir.permission", "777");
        conf.setVar(ConfVars.SCRATCHDIRPERMISSION, "777");
        scratchDirFile.mkdirs();
        // also set the permissions manually since Hive doesn't do it...
        scratchDirFile.setWritable(true, false);
    }

    int random = new Random().nextInt();

    conf.set("hive.metastore.warehouse.dir", scratchDir + "/warehouse" + random);
    conf.set("hive.metastore.metadb.dir", scratchDir + "/metastore_db" + random);
    conf.set("hive.exec.scratchdir", scratchDir);
    conf.set("fs.permissions.umask-mode", "022");
    conf.set("javax.jdo.option.ConnectionURL",
            "jdbc:derby:;databaseName=" + scratchDir + "/metastore_db" + random + ";create=true");
    conf.set("hive.metastore.local", "true");
    conf.set("hive.aux.jars.path", "");
    conf.set("hive.added.jars.path", "");
    conf.set("hive.added.files.path", "");
    conf.set("hive.added.archives.path", "");
    conf.set("fs.default.name", "file:///");

    // clear mapred.job.tracker - Hadoop defaults to 'local' if not defined. Hive however expects this to be set to 'local' - if it's not, it does a remote execution (i.e. no child JVM)
    Field field = Configuration.class.getDeclaredField("properties");
    field.setAccessible(true);
    Properties props = (Properties) field.get(conf);
    props.remove("mapred.job.tracker");
    props.remove("mapreduce.framework.name");
    props.setProperty("fs.default.name", "file:///");

    // intercept SessionState to clean the threadlocal
    Field tss = SessionState.class.getDeclaredField("tss");
    tss.setAccessible(true);
    tss.set(null, new InterceptingThreadLocal());

    return new HiveConf(conf);
}

From source file:org.apache.flink.runtime.blob.BlobCacheGetTest.java

/**
 * Retrieves a BLOB via a {@link BlobCacheService} which cannot create incoming files. File
 * transfers should fail./*from www .ja  v a  2  s.  c o m*/
 *
 * @param jobId
 *       job id
 * @param blobType
 *       whether the BLOB should become permanent or transient
 */
private void testGetFailsIncoming(@Nullable final JobID jobId, BlobKey.BlobType blobType) throws IOException {
    assumeTrue(!OperatingSystem.isWindows()); //setWritable doesn't work on Windows.

    final Configuration config = new Configuration();
    config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath());

    File tempFileDir = null;
    try (BlobServer server = new BlobServer(config, new VoidBlobStore());
            BlobCacheService cache = new BlobCacheService(config, new VoidBlobStore(),
                    new InetSocketAddress("localhost", server.getPort()))) {

        server.start();

        // store the data on the server
        byte[] data = new byte[2000000];
        rnd.nextBytes(data);
        BlobKey blobKey = put(server, jobId, data, blobType);
        verifyType(blobType, blobKey);

        // make sure the blob cache cannot create any files in its storage dir
        if (blobType == PERMANENT_BLOB) {
            tempFileDir = cache.getPermanentBlobService().createTemporaryFilename().getParentFile();
        } else {
            tempFileDir = cache.getTransientBlobService().createTemporaryFilename().getParentFile();
        }
        assertTrue(tempFileDir.setExecutable(true, false));
        assertTrue(tempFileDir.setReadable(true, false));
        assertTrue(tempFileDir.setWritable(false, false));

        // request the file from the server via the cache
        exception.expect(IOException.class);
        exception.expectMessage("Failed to fetch BLOB ");

        try {
            get(cache, jobId, blobKey);
        } finally {
            HashSet<String> expectedDirs = new HashSet<>();
            expectedDirs.add("incoming");
            if (jobId != null) {
                // only the incoming and job directory should exist (no job directory!)
                expectedDirs.add(JOB_DIR_PREFIX + jobId);
                File storageDir = tempFileDir.getParentFile();
                String[] actualDirs = storageDir.list();
                assertNotNull(actualDirs);
                assertEquals(expectedDirs, new HashSet<>(Arrays.asList(actualDirs)));

                // job directory should be empty
                File jobDir = new File(tempFileDir.getParentFile(), JOB_DIR_PREFIX + jobId);
                assertArrayEquals(new String[] {}, jobDir.list());
            } else {
                // only the incoming and no_job directory should exist (no job directory!)
                expectedDirs.add(NO_JOB_DIR_PREFIX);
                File storageDir = tempFileDir.getParentFile();
                String[] actualDirs = storageDir.list();
                assertNotNull(actualDirs);
                assertEquals(expectedDirs, new HashSet<>(Arrays.asList(actualDirs)));

                // no_job directory should be empty
                File noJobDir = new File(tempFileDir.getParentFile(), NO_JOB_DIR_PREFIX);
                assertArrayEquals(new String[] {}, noJobDir.list());
            }

            // file should still be there on the server (even if transient)
            assertTrue(server.getStorageLocation(jobId, blobKey).exists());
        }
    } finally {
        // set writable again to make sure we can remove the directory
        if (tempFileDir != null) {
            //noinspection ResultOfMethodCallIgnored
            tempFileDir.setWritable(true, false);
        }
    }
}

From source file:org.elasticsearch.hadoop.integration.hive.HiveEmbeddedServer2.java

private HiveConf configure() throws Exception {
    String scratchDir = NTFSLocalFileSystem.SCRATCH_DIR;

    File scratchDirFile = new File(scratchDir);
    TestUtils.delete(scratchDirFile);//from   ww  w. j a v a 2  s  .c om

    Configuration cfg = new Configuration();
    HiveConf conf = new HiveConf(cfg, HiveConf.class);
    conf.addToRestrictList("columns.comments");
    refreshConfig(conf);

    HdpBootstrap.hackHadoopStagingOnWin();

    // work-around for NTFS FS
    // set permissive permissions since otherwise, on some OS it fails
    if (TestUtils.isWindows()) {
        conf.set("fs.file.impl", NTFSLocalFileSystem.class.getName());
        conf.set("hive.scratch.dir.permission", "650");
        conf.setVar(ConfVars.SCRATCHDIRPERMISSION, "650");
        conf.set("hive.server2.enable.doAs", "false");
        conf.set("hive.execution.engine", "mr");
        //conf.set("hadoop.bin.path", getClass().getClassLoader().getResource("hadoop.cmd").getPath());
        System.setProperty("path.separator", ";");
        conf.setVar(HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER,
                DummyHiveAuthenticationProvider.class.getName());
    } else {
        conf.set("hive.scratch.dir.permission", "777");
        conf.setVar(ConfVars.SCRATCHDIRPERMISSION, "777");
        scratchDirFile.mkdirs();
        // also set the permissions manually since Hive doesn't do it...
        scratchDirFile.setWritable(true, false);
    }

    int random = new Random().nextInt();

    conf.set("hive.metastore.warehouse.dir", scratchDir + "/warehouse" + random);
    conf.set("hive.metastore.metadb.dir", scratchDir + "/metastore_db" + random);
    conf.set("hive.exec.scratchdir", scratchDir);
    conf.set("fs.permissions.umask-mode", "022");
    conf.set("javax.jdo.option.ConnectionURL",
            "jdbc:derby:;databaseName=" + scratchDir + "/metastore_db" + random + ";create=true");
    conf.set("hive.metastore.local", "true");
    conf.set("hive.aux.jars.path", "");
    conf.set("hive.added.jars.path", "");
    conf.set("hive.added.files.path", "");
    conf.set("hive.added.archives.path", "");
    conf.set("fs.default.name", "file:///");

    // clear mapred.job.tracker - Hadoop defaults to 'local' if not defined. Hive however expects this to be set to 'local' - if it's not, it does a remote execution (i.e. no child JVM)
    Field field = Configuration.class.getDeclaredField("properties");
    field.setAccessible(true);
    Properties props = (Properties) field.get(conf);
    props.remove("mapred.job.tracker");
    props.remove("mapreduce.framework.name");
    props.setProperty("fs.default.name", "file:///");

    // intercept SessionState to clean the threadlocal
    Field tss = SessionState.class.getDeclaredField("tss");
    tss.setAccessible(true);
    //tss.set(null, new InterceptingThreadLocal());

    return new HiveConf(conf);
}

From source file:org.apache.flink.runtime.blob.BlobCacheGetTest.java

/**
 * Retrieves a BLOB via a {@link BlobCacheService} which cannot create the final storage file.
 * File transfers should fail.//  www .ja  va 2s.co  m
 *
 * @param jobId
 *       job id
 * @param blobType
 *       whether the BLOB should become permanent or transient
 */
private void testGetFailsStore(@Nullable final JobID jobId, BlobKey.BlobType blobType)
        throws IOException, InterruptedException {
    assumeTrue(!OperatingSystem.isWindows()); //setWritable doesn't work on Windows.

    final Configuration config = new Configuration();
    config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath());

    File jobStoreDir = null;
    try (BlobServer server = new BlobServer(config, new VoidBlobStore());
            BlobCacheService cache = new BlobCacheService(config, new VoidBlobStore(),
                    new InetSocketAddress("localhost", server.getPort()))) {

        server.start();

        // store the data on the server
        byte[] data = new byte[2000000];
        rnd.nextBytes(data);
        BlobKey blobKey = put(server, jobId, data, blobType);
        verifyType(blobType, blobKey);

        // make sure the blob cache cannot create any files in its storage dir
        if (blobType == PERMANENT_BLOB) {
            jobStoreDir = cache.getPermanentBlobService().getStorageLocation(jobId, new PermanentBlobKey())
                    .getParentFile();
        } else {
            jobStoreDir = cache.getTransientBlobService().getStorageLocation(jobId, new TransientBlobKey())
                    .getParentFile();
        }
        assertTrue(jobStoreDir.setExecutable(true, false));
        assertTrue(jobStoreDir.setReadable(true, false));
        assertTrue(jobStoreDir.setWritable(false, false));

        // request the file from the server via the cache
        exception.expect(AccessDeniedException.class);

        try {
            get(cache, jobId, blobKey);
        } finally {
            // there should be no remaining incoming files
            File incomingFileDir = new File(jobStoreDir.getParent(), "incoming");
            assertArrayEquals(new String[] {}, incomingFileDir.list());

            // there should be no files in the job directory
            assertArrayEquals(new String[] {}, jobStoreDir.list());

            // if transient, the get will fail but since the download was successful, the file
            // will not be on the server anymore
            if (blobType == TRANSIENT_BLOB) {
                verifyDeletedEventually(server, jobId, blobKey);
            } else {
                assertTrue(server.getStorageLocation(jobId, blobKey).exists());
            }
        }
    } finally {
        // set writable again to make sure we can remove the directory
        if (jobStoreDir != null) {
            //noinspection ResultOfMethodCallIgnored
            jobStoreDir.setWritable(true, false);
        }
    }
}

From source file:net.nicholaswilliams.java.licensing.licensor.interfaces.cli.TestConsoleRSAKeyPairGenerator.java

@Test
@Ignore("canRead()/canWrite() do not work on Win; setReadable()/setWritable() do not work on some Macs.")
public void testCheckAndPromptToOverwriteFile08() throws IOException {
    File file = new File("testCheckAndPromptToOverwriteFile08");
    file = file.getCanonicalFile();/*from   w  w  w . j a v a  2  s .c om*/
    FileUtils.writeStringToFile(file, "test string");

    assertTrue("Setting the file readable flag to false should have succeeded.",
            file.setWritable(false, false));
    assertTrue("The file should still be readable.", file.canRead());
    assertFalse("The file should not be writable.", file.canWrite());

    try {
        this.device.printErrLn(
                "The file " + file.getCanonicalPath() + " already exists and cannot be overwritten.");
        EasyMock.expectLastCall();

        EasyMock.replay(this.generator, this.device);

        assertFalse("The value returned should be true.",
                this.console.checkAndPromptToOverwriteFile("testCheckAndPromptToOverwriteFile08"));
    } finally {
        FileUtils.forceDelete(file);
    }
}

From source file:com.redsqirl.workflow.server.ActionManager.java

/**
 * List (cannonical class names) all the classes extending DataflowAction.
 * //from w ww .  jav a  2 s.c o  m
 * If possible, the classes will be read from a file. If not a file will be written for next time.
 * 
 * @see com.idiro.BlockManager#getNonAbstractClassesFromSuperClass(String)
 * @return The classes that extends DataflowAction
 */
private List<String> getDataflowActionClasses() {
    File dataFlowActionClassFile = new File(WorkflowPrefManager.getPathDataFlowActionClasses());
    List<String> dataFlowActionClassName = new LinkedList<String>();
    if (dataFlowActionClassFile.exists()) {

        //logger.info("getDataflowActionClasses exist");

        try {
            BufferedReader br = new BufferedReader(new FileReader(dataFlowActionClassFile));
            String line = null;
            while ((line = br.readLine()) != null) {
                dataFlowActionClassName.add(line);
            }
            br.close();
        } catch (Exception e) {
            logger.error("Error while reading class file", e);
            dataFlowActionClassFile.delete();
        }
    }

    if (!dataFlowActionClassFile.exists()) {

        //logger.info("getDataflowActionClasses not exist");

        dataFlowActionClassName = WorkflowPrefManager.getInstance()
                .getNonAbstractClassesFromSuperClass(DataflowAction.class.getCanonicalName());
        try {
            BufferedWriter bw = new BufferedWriter(new FileWriter(dataFlowActionClassFile));
            Iterator<String> dataoutputClassNameIt = dataFlowActionClassName.iterator();
            while (dataoutputClassNameIt.hasNext()) {
                bw.write(dataoutputClassNameIt.next());
                bw.newLine();
            }
            bw.close();
            //Everyone can remove this file
            dataFlowActionClassFile.setReadable(true, false);
            dataFlowActionClassFile.setWritable(true, false);
        } catch (Exception e) {
            logger.error("Error while writing class file", e);
            dataFlowActionClassFile.delete();
        }

    }
    logger.debug("Return data flow classes: " + dataFlowActionClassName);
    return dataFlowActionClassName;
}