Example usage for org.apache.hadoop.io.retry RetryProxy create

List of usage examples for org.apache.hadoop.io.retry RetryProxy create

Introduction

In this page you can find the example usage for org.apache.hadoop.io.retry RetryProxy create.

Prototype

public static <T> Object create(Class<T> iface, T implementation,
        Map<String, RetryPolicy> methodNameToPolicyMap) 

Source Link

Document

Create a proxy for an interface of an implementation class using the a set of retry policies specified by method name.

Usage

From source file:com.aliyun.fs.oss.blk.OssFileSystem.java

License:Apache License

private static FileSystemStore createDefaultStore(Configuration conf) {
    FileSystemStore store = new JetOssFileSystemStore();

    RetryPolicy basePolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep(
            conf.getInt("fs.oss.maxRetries", 4), conf.getLong("fs.oss.sleepTimeSeconds", 10), TimeUnit.SECONDS);
    Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap = new HashMap<Class<? extends Exception>, RetryPolicy>();
    exceptionToPolicyMap.put(IOException.class, basePolicy);
    exceptionToPolicyMap.put(OssException.class, basePolicy);

    RetryPolicy methodPolicy = RetryPolicies.retryByException(RetryPolicies.TRY_ONCE_THEN_FAIL,
            exceptionToPolicyMap);/*from  w ww.j  a va2  s.  c o m*/
    Map<String, RetryPolicy> methodNameToPolicyMap = new HashMap<String, RetryPolicy>();
    methodNameToPolicyMap.put("storeBlock", methodPolicy);
    methodNameToPolicyMap.put("retrieveBlock", methodPolicy);

    return (FileSystemStore) RetryProxy.create(FileSystemStore.class, store, methodNameToPolicyMap);
}

From source file:com.aliyun.fs.oss.nat.NativeOssFileSystem.java

License:Apache License

private static NativeFileSystemStore createDefaultStore(Configuration conf) {
    NativeFileSystemStore store = new JetOssNativeFileSystemStore();

    RetryPolicy basePolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep(
            conf.getInt("fs.oss.maxRetries", 4), conf.getLong("fs.oss.sleepTimeSeconds", 10), TimeUnit.SECONDS);
    Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap = new HashMap<Class<? extends Exception>, RetryPolicy>();
    // for reflection invoke.
    exceptionToPolicyMap.put(InvocationTargetException.class, basePolicy);
    exceptionToPolicyMap.put(IOException.class, basePolicy);
    exceptionToPolicyMap.put(OssException.class, basePolicy);

    RetryPolicy methodPolicy = RetryPolicies.retryByException(RetryPolicies.TRY_ONCE_THEN_FAIL,
            exceptionToPolicyMap);//from w w  w .  j  a  v  a 2 s. c om
    Map<String, RetryPolicy> methodNameToPolicyMap = new HashMap<String, RetryPolicy>();
    methodNameToPolicyMap.put("storeFile", methodPolicy);
    methodNameToPolicyMap.put("storeFiles", methodPolicy);
    methodNameToPolicyMap.put("storeEmptyFile", methodPolicy);
    methodNameToPolicyMap.put("retrieveMetadata", methodPolicy);
    methodNameToPolicyMap.put("retrieve", methodPolicy);
    methodNameToPolicyMap.put("purge", methodPolicy);
    methodNameToPolicyMap.put("dump", methodPolicy);
    methodNameToPolicyMap.put("doesObjectExist", methodPolicy);
    methodNameToPolicyMap.put("copy", methodPolicy);
    methodNameToPolicyMap.put("list", methodPolicy);
    methodNameToPolicyMap.put("delete", methodPolicy);

    return (NativeFileSystemStore) RetryProxy.create(NativeFileSystemStore.class, store, methodNameToPolicyMap);
}

From source file:com.github.hdl.tensorflow.yarn.app.TFApplicationRpcClient.java

License:Apache License

public TFApplicationRpc getRpc() {
    InetSocketAddress address = new InetSocketAddress(serverAddress, serverPort);
    Configuration conf = new Configuration();
    RetryPolicy retryPolicy = RMProxy.createRetryPolicy(conf, false);
    try {/*from  www. j  av a2s .  c o m*/
        TensorFlowCluster proxy = RMProxy.createRMProxy(conf, TensorFlowCluster.class, address);
        this.tensorflow = (TensorFlowCluster) RetryProxy.create(TensorFlowCluster.class, proxy, retryPolicy);
        return this;
    } catch (IOException e) {
        return null;
    }
}

From source file:com.quixey.hadoop.fs.oss.OSSFileSystem.java

License:Apache License

private static FileSystemStore createDefaultStore(Configuration conf) {
    FileSystemStore store = new CloudOSSFileSystemStore();

    RetryPolicy basePolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep(
            conf.getInt(OSS_MAX_RETRIES_PROPERTY, 4), conf.getLong(OSS_SLEEP_TIME_SECONDS_PROPERTY, 10),
            TimeUnit.SECONDS);//from  w  w  w . j  a  v a 2 s . c  o m
    Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap = new HashMap<>();
    exceptionToPolicyMap.put(IOException.class, basePolicy);

    RetryPolicy methodPolicy = RetryPolicies.retryByException(RetryPolicies.TRY_ONCE_THEN_FAIL,
            exceptionToPolicyMap);
    Map<String, RetryPolicy> methodNameToPolicyMap = new HashMap<>();
    methodNameToPolicyMap.put("storeFile", methodPolicy);
    methodNameToPolicyMap.put("rename", methodPolicy);

    return (FileSystemStore) RetryProxy.create(FileSystemStore.class, store, methodNameToPolicyMap);
}

From source file:com.sun.grid.herd.HerdLoadSensor.java

License:Open Source License

/**
 * Create a connection to the namenode./*from   w  w  w .j av  a2s  .  c  o  m*/
 * @param conf the Hadoop configuration
 * @return a handle to the namenode
 * @throws IOException Thrown if there is an error while communicating
 * with the namenode.
 */
private static NamenodeProtocol createNamenode(Configuration conf) throws IOException {
    InetSocketAddress address = NameNode.getAddress(conf);
    RetryPolicy timeoutPolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep(3, 200,
            TimeUnit.MILLISECONDS);
    Map<String, RetryPolicy> policyMap = Collections.singletonMap("getBlocks", timeoutPolicy);

    UserGroupInformation info = null;

    try {
        info = UnixUserGroupInformation.login(conf);
    } catch (LoginException e) {
        throw new IOException(StringUtils.stringifyException(e));
    }

    VersionedProtocol proxy = RPC.getProxy(NamenodeProtocol.class, NamenodeProtocol.versionID, address, info,
            conf, NetUtils.getDefaultSocketFactory(conf));
    NamenodeProtocol ret = (NamenodeProtocol) RetryProxy.create(NamenodeProtocol.class, proxy, policyMap);

    return ret;
}

From source file:io.druid.indexer.JobHelper.java

License:Apache License

public static DataSegment serializeOutIndex(final DataSegment segmentTemplate,
        final Configuration configuration, final Progressable progressable, final TaskAttemptID taskAttemptID,
        final File mergedBase, final Path segmentBasePath) throws IOException {
    final FileSystem outputFS = FileSystem.get(segmentBasePath.toUri(), configuration);
    final Path tmpPath = new Path(segmentBasePath, String.format("index.zip.%d", taskAttemptID.getId()));
    final AtomicLong size = new AtomicLong(0L);
    final DataPusher zipPusher = (DataPusher) RetryProxy.create(DataPusher.class, new DataPusher() {
        @Override/*from w w w .j  av a2  s .  co  m*/
        public long push() throws IOException {
            try (OutputStream outputStream = outputFS.create(tmpPath, true, DEFAULT_FS_BUFFER_SIZE,
                    progressable)) {
                size.set(zipAndCopyDir(mergedBase, outputStream, progressable));
                outputStream.flush();
            } catch (IOException | RuntimeException exception) {
                log.error(exception, "Exception in retry loop");
                throw exception;
            }
            return -1;
        }
    }, RetryPolicies.exponentialBackoffRetry(NUM_RETRIES, SECONDS_BETWEEN_RETRIES, TimeUnit.SECONDS));
    zipPusher.push();
    log.info("Zipped %,d bytes to [%s]", size.get(), tmpPath.toUri());

    final Path finalIndexZipFilePath = new Path(segmentBasePath, "index.zip");
    final URI indexOutURI = finalIndexZipFilePath.toUri();
    final ImmutableMap<String, Object> loadSpec;
    // TODO: Make this a part of Pushers or Pullers
    switch (outputFS.getScheme()) {
    case "hdfs":
        loadSpec = ImmutableMap.<String, Object>of("type", "hdfs", "path", indexOutURI.toString());
        break;
    case "s3":
    case "s3n":
        loadSpec = ImmutableMap.<String, Object>of("type", "s3_zip", "bucket", indexOutURI.getHost(), "key",
                indexOutURI.getPath().substring(1) // remove the leading "/"
        );
        break;
    case "file":
        loadSpec = ImmutableMap.<String, Object>of("type", "local", "path", indexOutURI.getPath());
        break;
    default:
        throw new IAE("Unknown file system scheme [%s]", outputFS.getScheme());
    }
    final DataSegment finalSegment = segmentTemplate.withLoadSpec(loadSpec).withSize(size.get())
            .withBinaryVersion(SegmentUtils.getVersionFromDir(mergedBase));

    if (!renameIndexFiles(outputFS, tmpPath, finalIndexZipFilePath)) {
        throw new IOException(String.format("Unable to rename [%s] to [%s]", tmpPath.toUri().toString(),
                finalIndexZipFilePath.toUri().toString()));
    }
    writeSegmentDescriptor(outputFS, finalSegment, new Path(segmentBasePath, "descriptor.json"), progressable);
    return finalSegment;
}

From source file:io.druid.indexer.JobHelper.java

License:Apache License

public static void writeSegmentDescriptor(final FileSystem outputFS, final DataSegment segment,
        final Path descriptorPath, final Progressable progressable) throws IOException {
    final DataPusher descriptorPusher = (DataPusher) RetryProxy.create(DataPusher.class, new DataPusher() {
        @Override//  w  w  w  .  j  a  v a 2  s  . c  o m
        public long push() throws IOException {
            try {
                progressable.progress();
                if (outputFS.exists(descriptorPath)) {
                    if (!outputFS.delete(descriptorPath, false)) {
                        throw new IOException(
                                String.format("Failed to delete descriptor at [%s]", descriptorPath));
                    }
                }
                try (final OutputStream descriptorOut = outputFS.create(descriptorPath, true,
                        DEFAULT_FS_BUFFER_SIZE, progressable)) {
                    HadoopDruidIndexerConfig.jsonMapper.writeValue(descriptorOut, segment);
                    descriptorOut.flush();
                }
            } catch (RuntimeException | IOException ex) {
                log.info(ex, "Exception in descriptor pusher retry loop");
                throw ex;
            }
            return -1;
        }
    }, RetryPolicies.exponentialBackoffRetry(NUM_RETRIES, SECONDS_BETWEEN_RETRIES, TimeUnit.SECONDS));
    descriptorPusher.push();
}

From source file:io.druid.indexer.JobHelper.java

License:Apache License

public static long unzipNoGuava(final Path zip, final Configuration configuration, final File outDir,
        final Progressable progressable) throws IOException {
    final DataPusher zipPusher = (DataPusher) RetryProxy.create(DataPusher.class, new DataPusher() {
        @Override//from  www .ja  v a2  s .  com
        public long push() throws IOException {
            try {
                final FileSystem fileSystem = zip.getFileSystem(configuration);
                long size = 0L;
                final byte[] buffer = new byte[1 << 13];
                progressable.progress();
                try (ZipInputStream in = new ZipInputStream(fileSystem.open(zip, 1 << 13))) {
                    for (ZipEntry entry = in.getNextEntry(); entry != null; entry = in.getNextEntry()) {
                        final String fileName = entry.getName();
                        try (final OutputStream out = new BufferedOutputStream(
                                new FileOutputStream(outDir.getAbsolutePath() + File.separator + fileName),
                                1 << 13)) {
                            for (int len = in.read(buffer); len >= 0; len = in.read(buffer)) {
                                progressable.progress();
                                if (len == 0) {
                                    continue;
                                }
                                size += len;
                                out.write(buffer, 0, len);
                            }
                            out.flush();
                        }
                    }
                }
                progressable.progress();
                return size;
            } catch (IOException | RuntimeException exception) {
                log.error(exception, "Exception in unzip retry loop");
                throw exception;
            }
        }
    }, RetryPolicies.exponentialBackoffRetry(NUM_RETRIES, SECONDS_BETWEEN_RETRIES, TimeUnit.SECONDS));
    return zipPusher.push();
}

From source file:org.apache.druid.indexer.JobHelper.java

License:Apache License

public static DataSegment serializeOutIndex(final DataSegment segmentTemplate,
        final Configuration configuration, final Progressable progressable, final File mergedBase,
        final Path finalIndexZipFilePath, final Path tmpPath, DataSegmentPusher dataSegmentPusher)
        throws IOException {
    final FileSystem outputFS = FileSystem.get(finalIndexZipFilePath.toUri(), configuration);
    final AtomicLong size = new AtomicLong(0L);
    final DataPusher zipPusher = (DataPusher) RetryProxy.create(DataPusher.class, new DataPusher() {
        @Override//from  ww w.  j  ava  2s  .c  o m
        public long push() throws IOException {
            try (OutputStream outputStream = outputFS.create(tmpPath, true, DEFAULT_FS_BUFFER_SIZE,
                    progressable)) {
                size.set(zipAndCopyDir(mergedBase, outputStream, progressable));
            } catch (IOException | RuntimeException exception) {
                log.error(exception, "Exception in retry loop");
                throw exception;
            }
            return -1;
        }
    }, RetryPolicies.exponentialBackoffRetry(NUM_RETRIES, SECONDS_BETWEEN_RETRIES, TimeUnit.SECONDS));
    zipPusher.push();
    log.info("Zipped %,d bytes to [%s]", size.get(), tmpPath.toUri());

    final URI indexOutURI = finalIndexZipFilePath.toUri();
    final DataSegment finalSegment = segmentTemplate.withLoadSpec(dataSegmentPusher.makeLoadSpec(indexOutURI))
            .withSize(size.get()).withBinaryVersion(SegmentUtils.getVersionFromDir(mergedBase));

    if (!renameIndexFiles(outputFS, tmpPath, finalIndexZipFilePath)) {
        throw new IOE("Unable to rename [%s] to [%s]", tmpPath.toUri().toString(),
                finalIndexZipFilePath.toUri().toString());
    }

    return finalSegment;
}

From source file:org.apache.druid.indexer.JobHelper.java

License:Apache License

public static void writeSegmentDescriptor(final FileSystem outputFS, final DataSegment segment,
        final Path descriptorPath, final Progressable progressable) throws IOException {
    final DataPusher descriptorPusher = (DataPusher) RetryProxy.create(DataPusher.class, new DataPusher() {
        @Override//from   w  ww  . ja  va2 s  .  co m
        public long push() throws IOException {
            try {
                progressable.progress();
                if (outputFS.exists(descriptorPath)) {
                    if (!outputFS.delete(descriptorPath, false)) {
                        throw new IOE("Failed to delete descriptor at [%s]", descriptorPath);
                    }
                }
                try (final OutputStream descriptorOut = outputFS.create(descriptorPath, true,
                        DEFAULT_FS_BUFFER_SIZE, progressable)) {
                    HadoopDruidIndexerConfig.JSON_MAPPER.writeValue(descriptorOut, segment);
                }
            } catch (RuntimeException | IOException ex) {
                log.info(ex, "Exception in descriptor pusher retry loop");
                throw ex;
            }
            return -1;
        }
    }, RetryPolicies.exponentialBackoffRetry(NUM_RETRIES, SECONDS_BETWEEN_RETRIES, TimeUnit.SECONDS));
    descriptorPusher.push();
}