Example usage for java.lang Thread interrupted

List of usage examples for java.lang Thread interrupted

Introduction

In this page you can find the example usage for java.lang Thread interrupted.

Prototype

public static boolean interrupted() 

Source Link

Document

Tests whether the current thread has been interrupted.

Usage

From source file:org.marketcetera.util.except.ExceptUtilsTest.java

@Test
public void interruptionMessageThrow() {
    Thread.currentThread().interrupt();
    try {/* w ww . j av a  2s. co m*/
        ExceptUtils.checkInterruption(TEST_MSG_1);
        fail();
    } catch (InterruptedException ex) {
        assertTrue(Thread.interrupted());
        assertEquals(TEST_MSG_1, ex.getMessage());
        assertNull(ex.getCause());
    }
}

From source file:at.alladin.rmbt.android.impl.TracerouteAndroidImpl.java

public List<HopDetail> call() throws Exception {
    isRunning.set(true);// ww w.  j  a  va2 s. c  om
    List<HopDetail> pingDetailList = new ArrayList<HopDetail>();
    final Runtime runtime = Runtime.getRuntime();

    for (int i = 1; i <= maxHops; i++) {
        if (Thread.interrupted() || !isRunning.get()) {
            throw new InterruptedException();
        }
        final long ts = System.nanoTime();
        final Process mIpAddrProcess = runtime.exec("/system/bin/ping -c 1 -t " + i + " -W2 " + host);
        final String proc = readFromProcess(mIpAddrProcess);
        final PingDetailImpl pingDetail = new PingDetailImpl(proc, System.nanoTime() - ts);
        pingDetailList.add(pingDetail);
        if (pingDetail.getReceived() > 0) {
            hasMaxHopsExceeded = false;
            break;
        }
    }

    return pingDetailList;
}

From source file:edu.uchc.octane.ParticleAnalysis.java

/**
 * Analyze the image//www  .j  av  a 2 s .c o m
 * @param ip The image to be analyzed
 * @param mask A rectangle of region of interest
 * @param threshold Lowest intensity to be analyzed
 * @param noise The noise threshold of the watershed algorithm
 * @throws InterruptedException
 */
public void process(ImageProcessor ip, Rectangle mask, int threshold, int noise) throws InterruptedException {

    int border = 1;

    if (g_ != null) {

        //         g_.setImageData(ip, isZeroBg_);

        border = g_.getWindowSize();
    }

    width_ = ip.getWidth();
    height_ = ip.getHeight();

    int[] offsets = { -width_, -width_ + 1, +1, +width_ + 1, +width_, +width_ - 1, -1, -width_ - 1 };

    Rectangle bbox = new Rectangle(border, border, width_ - 2 * border, height_ - 2 * border);
    bbox = bbox.intersection(mask);

    ArrayList<Pixel> pixels = new ArrayList<Pixel>();

    for (int y = bbox.y; y < bbox.y + bbox.height; y++) {
        for (int x = bbox.x; x < bbox.x + bbox.width; x++) {
            int v = ip.get(x, y);
            if (v > threshold) {
                pixels.add(new Pixel(x, y, v));
            }
        }
    }
    Collections.sort(pixels);

    nParticles_ = 0;
    x_ = new double[pixels.size()];
    y_ = new double[pixels.size()];
    z_ = new double[pixels.size()];
    h_ = new double[pixels.size()];
    e_ = new double[pixels.size()];

    FloodState floodState = new FloodState(width_, height_);
    floodState.floodBorders(bbox);

    int idxList, lenList;
    int[] listOfIndexes = new int[width_ * height_];

    for (Pixel p : pixels) {

        if (Thread.interrupted()) {
            throw (new InterruptedException());
        }

        int index = p.x + width_ * p.y;

        if (floodState.isProcessed(index)) {
            continue;
        }

        int v = p.value;
        boolean isMax = true;

        idxList = 0;
        lenList = 1;

        listOfIndexes[0] = index;

        floodState.flood(index);

        do {
            index = listOfIndexes[idxList];
            for (int d = 0; d < 8; d++) { // analyze all neighbors (in 8 directions) at the same level

                int index2 = index + offsets[d];

                if (floodState.isProcessed(index2)) { //conflict
                    isMax = false;
                    break;
                }

                if (!floodState.isFlooded(index2)) {
                    int v2 = ip.get(index2);
                    if (v2 >= v - noise) {
                        listOfIndexes[lenList++] = index2;
                        floodState.flood(index2);
                    }
                }
            }
        } while (++idxList < lenList);

        for (idxList = 0; idxList < lenList; idxList++) {
            floodState.process(listOfIndexes[idxList]);
        }

        if (isMax) {

            if (g_ != null) {

                g_.setInitialCoordinates(p.x, p.y);

                try {

                    double[] result = g_.fit();

                    if (result == null) {
                        continue;
                    }

                    double h = g_.getH();
                    if (h < noise || h < getHeightMin() || h > getHeightMax()) {
                        continue;
                    }

                    double e = g_.getE();
                    if (e < getFittingQualityMin()) {
                        continue;
                    }

                    x_[nParticles_] = g_.getX();
                    y_[nParticles_] = g_.getY();
                    z_[nParticles_] = g_.getZ();
                    h_[nParticles_] = h;
                    e_[nParticles_] = e;
                    nParticles_++;
                } catch (MathIllegalStateException e) {
                    //failed fitting
                    continue;
                }
            } else {

                x_[nParticles_] = (double) p.x;
                y_[nParticles_] = (double) p.y;
                h_[nParticles_] = (double) p.value;
                nParticles_++;

            }
        }
    }
}

From source file:com.aquatest.webservice.AquaTestWebService.java

/**
 * Converts an HTTP response object into a JSON object.
 * /*from ww w. j a v  a 2s  . c  o  m*/
 * @param response
 *            an HTTP response in JSON string format
 * @return JSONObject created from the response
 */
// method declared static for Android optimisation
public static JSONObject getJsonFromResponse(HttpResponse response)
        throws JSONException, ClientProtocolException, IOException {

    // read the response stream
    InputStream inputStream = response.getEntity().getContent();
    byte[] data = new byte[256];
    int len = 0;

    StringBuffer buffer = new StringBuffer();
    while (-1 != (len = inputStream.read(data))) {
        buffer.append(new String(data, 0, len));

        if (Thread.interrupted()) {
            return null;
        }
    }
    inputStream.close();

    // turn response string into a JSON object
    // TODO create better error handling
    JSONObject o = null;
    try {
        o = new JSONObject(buffer.toString());
    } catch (JSONException e) {
        // Log.v("WEBSERVICE", "Error creating JSON object from response");
        throw e;
    }

    return o;
}

From source file:com.vivareal.logger.appender.AsyncAppender.java

/**
 * {@inheritDoc}//from  ww  w .  ja va2 s . co  m
 */
public void append(final LoggingEvent event) {
    // if dispatcher thread has died then setup it again to remain async
    // See bug 23021
    if ((dispatcher == null) || !dispatcher.isAlive() || (bufferSize <= 0)) {
        setupDispatcher();
    }

    event.getNDC();
    event.getThreadName();
    event.getMDCCopy();
    if (StringUtils.isNotBlank(application)) {
        MDC.put("application", application);
    }
    if (StringUtils.isNotBlank(environment)) {
        MDC.put("environment", environment);
    }
    if (locationInfo) {
        event.getLocationInformation();
    }
    event.getRenderedMessage();
    event.getThrowableStrRep();

    synchronized (buffer) {
        while (true) {
            int previousSize = buffer.size();

            if (previousSize < bufferSize) {
                buffer.add(event);

                if (previousSize == 0) {
                    buffer.notifyAll();
                }

                break;
            }

            boolean discard = true;
            if (blocking && !Thread.interrupted() && Thread.currentThread() != dispatcher) {
                try {
                    buffer.wait();
                    discard = false;
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                }
            }

            if (discard) {
                String loggerName = event.getLoggerName();
                DiscardSummary summary = (DiscardSummary) discardMap.get(loggerName);

                if (summary == null) {
                    summary = new DiscardSummary(event);
                    discardMap.put(loggerName, summary);
                } else {
                    summary.add(event);
                }

                break;
            }
        }
    }
}

From source file:org.apache.gobblin.cluster.StreamingJobConfigurationManager.java

private void fetchJobSpecs() throws ExecutionException, InterruptedException {
    List<Pair<SpecExecutor.Verb, Spec>> changesSpecs = (List<Pair<SpecExecutor.Verb, Spec>>) this.specConsumer
            .changedSpecs().get();//from w  ww  . jav a  2 s .c  o  m

    // propagate thread interruption so that caller will exit from loop
    if (Thread.interrupted()) {
        throw new InterruptedException();
    }

    for (Pair<SpecExecutor.Verb, Spec> entry : changesSpecs) {
        SpecExecutor.Verb verb = entry.getKey();
        if (verb.equals(SpecExecutor.Verb.ADD)) {
            // Handle addition
            JobSpec jobSpec = (JobSpec) entry.getValue();
            postNewJobConfigArrival(jobSpec.getUri().toString(), jobSpec.getConfigAsProperties());
        } else if (verb.equals(SpecExecutor.Verb.UPDATE)) {
            // Handle update
            JobSpec jobSpec = (JobSpec) entry.getValue();
            postUpdateJobConfigArrival(jobSpec.getUri().toString(), jobSpec.getConfigAsProperties());
        } else if (verb.equals(SpecExecutor.Verb.DELETE)) {
            // Handle delete
            Spec anonymousSpec = (Spec) entry.getValue();
            postDeleteJobConfigArrival(anonymousSpec.getUri().toString(), new Properties());
        }
    }
}

From source file:ch.epfl.scapetoad.CartogramGastner.java

/**
 * Starts the cartogram computation using the given grid size.
 * @param gridSize the size of the grid used for computation. The
 *        grid size must be a power of 2.
 *//* w  ww.  ja  va  2s . co m*/
public void compute(int gridSize) throws InterruptedException {

    // Store the grid size in the lx and ly attributes.
    lx = gridSize;
    ly = gridSize;

    this.initializeArrays();
    this.computeInitialDensity();
    FFT.coscosft(rho_0, 1, 1);

    boolean hasConverged = false;
    while (hasConverged == false) {
        if (Thread.interrupted()) {
            // Raise an InterruptedException.
            throw new InterruptedException("Computation has been interrupted by the user.");
        }

        hasConverged = this.integrateNonlinearVolterraEquation();

    }

    this.projectCartogramGrid();

}

From source file:org.apache.tinkerpop.gremlin.server.op.AbstractOpProcessor.java

/**
 * Provides a generic way of iterating a result set back to the client. Implementers should respect the
 * {@link Settings#serializedResponseTimeout} configuration and break the serialization process if
 * it begins to take too long to do so, throwing a {@link java.util.concurrent.TimeoutException} in such
 * cases./*www  . j a va 2 s .c  o m*/
 *
 * @param context The Gremlin Server {@link Context} object containing settings, request message, etc.
 * @param itty The result to iterator
 * @throws TimeoutException if the time taken to serialize the entire result set exceeds the allowable time.
 */
protected void handleIterator(final Context context, final Iterator itty)
        throws TimeoutException, InterruptedException {
    final ChannelHandlerContext ctx = context.getChannelHandlerContext();
    final RequestMessage msg = context.getRequestMessage();
    final Settings settings = context.getSettings();
    final MessageSerializer serializer = ctx.channel().attr(StateKey.SERIALIZER).get();
    final boolean useBinary = ctx.channel().attr(StateKey.USE_BINARY).get();
    boolean warnOnce = false;

    // sessionless requests are always transaction managed, but in-session requests are configurable.
    final boolean managedTransactionsForRequest = manageTransactions ? true
            : (Boolean) msg.getArgs().getOrDefault(Tokens.ARGS_MANAGE_TRANSACTION, false);

    // we have an empty iterator - happens on stuff like: g.V().iterate()
    if (!itty.hasNext()) {
        // as there is nothing left to iterate if we are transaction managed then we should execute a
        // commit here before we send back a NO_CONTENT which implies success
        if (managedTransactionsForRequest)
            attemptCommit(msg, context.getGraphManager(), settings.strictTransactionManagement);
        ctx.writeAndFlush(ResponseMessage.build(msg).code(ResponseStatusCode.NO_CONTENT).create());
        return;
    }

    // timer for the total serialization time
    final StopWatch stopWatch = new StopWatch();
    stopWatch.start();

    // the batch size can be overridden by the request
    final int resultIterationBatchSize = (Integer) msg.optionalArgs(Tokens.ARGS_BATCH_SIZE)
            .orElse(settings.resultIterationBatchSize);
    List<Object> aggregate = new ArrayList<>(resultIterationBatchSize);

    // use an external control to manage the loop as opposed to just checking hasNext() in the while.  this
    // prevent situations where auto transactions create a new transaction after calls to commit() withing
    // the loop on calls to hasNext().
    boolean hasMore = itty.hasNext();

    while (hasMore) {
        if (Thread.interrupted())
            throw new InterruptedException();

        // check if an implementation needs to force flush the aggregated results before the iteration batch
        // size is reached.
        final boolean forceFlush = isForceFlushed(ctx, msg, itty);

        // have to check the aggregate size because it is possible that the channel is not writeable (below)
        // so iterating next() if the message is not written and flushed would bump the aggregate size beyond
        // the expected resultIterationBatchSize.  Total serialization time for the response remains in
        // effect so if the client is "slow" it may simply timeout.
        //
        // there is a need to check hasNext() on the iterator because if the channel is not writeable the
        // previous pass through the while loop will have next()'d the iterator and if it is "done" then a
        // NoSuchElementException will raise its head. also need a check to ensure that this iteration doesn't
        // require a forced flush which can be forced by sub-classes.
        //
        // this could be placed inside the isWriteable() portion of the if-then below but it seems better to
        // allow iteration to continue into a batch if that is possible rather than just doing nothing at all
        // while waiting for the client to catch up
        if (aggregate.size() < resultIterationBatchSize && itty.hasNext() && !forceFlush)
            aggregate.add(itty.next());

        // send back a page of results if batch size is met or if it's the end of the results being iterated.
        // also check writeability of the channel to prevent OOME for slow clients.
        if (ctx.channel().isWritable()) {
            if (forceFlush || aggregate.size() == resultIterationBatchSize || !itty.hasNext()) {
                final ResponseStatusCode code = itty.hasNext() ? ResponseStatusCode.PARTIAL_CONTENT
                        : ResponseStatusCode.SUCCESS;

                // serialize here because in sessionless requests the serialization must occur in the same
                // thread as the eval.  as eval occurs in the GremlinExecutor there's no way to get back to the
                // thread that processed the eval of the script so, we have to push serialization down into that
                Frame frame = null;
                try {
                    frame = makeFrame(ctx, msg, serializer, useBinary, aggregate, code,
                            generateMetaData(ctx, msg, code, itty));
                } catch (Exception ex) {
                    // a frame may use a Bytebuf which is a countable release - if it does not get written
                    // downstream it needs to be released here
                    if (frame != null)
                        frame.tryRelease();

                    // exception is handled in makeFrame() - serialization error gets written back to driver
                    // at that point
                    if (managedTransactionsForRequest)
                        attemptRollback(msg, context.getGraphManager(), settings.strictTransactionManagement);
                    break;
                }

                // track whether there is anything left in the iterator because it needs to be accessed after
                // the transaction could be closed - in that case a call to hasNext() could open a new transaction
                // unintentionally
                final boolean moreInIterator = itty.hasNext();

                try {
                    // only need to reset the aggregation list if there's more stuff to write
                    if (moreInIterator)
                        aggregate = new ArrayList<>(resultIterationBatchSize);
                    else {
                        // iteration and serialization are both complete which means this finished successfully. note that
                        // errors internal to script eval or timeout will rollback given GremlinServer's global configurations.
                        // local errors will get rolledback below because the exceptions aren't thrown in those cases to be
                        // caught by the GremlinExecutor for global rollback logic. this only needs to be committed if
                        // there are no more items to iterate and serialization is complete
                        if (managedTransactionsForRequest)
                            attemptCommit(msg, context.getGraphManager(), settings.strictTransactionManagement);

                        // exit the result iteration loop as there are no more results left.  using this external control
                        // because of the above commit.  some graphs may open a new transaction on the call to
                        // hasNext()
                        hasMore = false;
                    }
                } catch (Exception ex) {
                    // a frame may use a Bytebuf which is a countable release - if it does not get written
                    // downstream it needs to be released here
                    if (frame != null)
                        frame.tryRelease();
                    throw ex;
                }

                if (!moreInIterator)
                    iterateComplete(ctx, msg, itty);

                // the flush is called after the commit has potentially occurred.  in this way, if a commit was
                // required then it will be 100% complete before the client receives it. the "frame" at this point
                // should have completely detached objects from the transaction (i.e. serialization has occurred)
                // so a new one should not be opened on the flush down the netty pipeline
                ctx.writeAndFlush(frame);
            }
        } else {
            // don't keep triggering this warning over and over again for the same request
            if (!warnOnce) {
                logger.warn(
                        "Pausing response writing as writeBufferHighWaterMark exceeded on {} - writing will continue once client has caught up",
                        msg);
                warnOnce = true;
            }

            // since the client is lagging we can hold here for a period of time for the client to catch up.
            // this isn't blocking the IO thread - just a worker.
            TimeUnit.MILLISECONDS.sleep(10);
        }

        stopWatch.split();
        if (settings.serializedResponseTimeout > 0
                && stopWatch.getSplitTime() > settings.serializedResponseTimeout) {
            final String timeoutMsg = String.format(
                    "Serialization of the entire response exceeded the 'serializeResponseTimeout' setting %s",
                    warnOnce ? "[Gremlin Server paused writes to client as messages were not being consumed quickly enough]"
                            : "");
            throw new TimeoutException(timeoutMsg.trim());
        }

        stopWatch.unsplit();
    }

    stopWatch.stop();
}

From source file:org.executequery.gui.resultset.ResultSetTableModel.java

public void createTable(ResultSet resultSet) {

    if (!isOpenAndValid(resultSet)) {

        clearData();//from   w  ww . j a va2  s. c  o m
        return;
    }

    try {

        resetMetaData();
        ResultSetMetaData rsmd = resultSet.getMetaData();

        columnHeaders.clear();
        visibleColumnHeaders.clear();
        tableData.clear();

        int zeroBaseIndex = 0;
        int count = rsmd.getColumnCount();
        for (int i = 1; i <= count; i++) {

            zeroBaseIndex = i - 1;

            columnHeaders.add(new ResultSetColumnHeader(zeroBaseIndex, rsmd.getColumnLabel(i),
                    rsmd.getColumnName(i), rsmd.getColumnType(i), rsmd.getColumnTypeName(i)));
        }

        int recordCount = 0;
        interrupted = false;

        if (holdMetaData) {

            setMetaDataVectors(rsmd);
        }

        List<RecordDataItem> rowData;
        long time = System.currentTimeMillis();
        while (resultSet.next()) {

            if (interrupted || Thread.interrupted()) {

                throw new InterruptedException();
            }

            recordCount++;
            rowData = new ArrayList<RecordDataItem>(count);

            for (int i = 1; i <= count; i++) {

                zeroBaseIndex = i - 1;

                ResultSetColumnHeader header = columnHeaders.get(zeroBaseIndex);
                RecordDataItem value = recordDataItemFactory.create(header);

                try {

                    int dataType = header.getDataType();
                    switch (dataType) {

                    // some drivers (informix for example)
                    // was noticed to return the hashcode from
                    // getObject for -1 data types (eg. longvarchar).
                    // force string for these - others stick with
                    // getObject() for default value formatting

                    case Types.CHAR:
                    case Types.VARCHAR:
                        value.setValue(resultSet.getString(i));
                        break;
                    case Types.DATE:
                        value.setValue(resultSet.getDate(i));
                        break;
                    case Types.TIME:
                        value.setValue(resultSet.getTime(i));
                        break;
                    case Types.TIMESTAMP:
                        value.setValue(resultSet.getTimestamp(i));
                        break;
                    case Types.LONGVARCHAR:
                    case Types.CLOB:
                        value.setValue(resultSet.getClob(i));
                        break;
                    case Types.LONGVARBINARY:
                    case Types.VARBINARY:
                    case Types.BINARY:
                        value.setValue(resultSet.getBytes(i));
                        break;
                    case Types.BLOB:
                        value.setValue(resultSet.getBlob(i));
                        break;
                    case Types.BIT:
                    case Types.TINYINT:
                    case Types.SMALLINT:
                    case Types.INTEGER:
                    case Types.BIGINT:
                    case Types.FLOAT:
                    case Types.REAL:
                    case Types.DOUBLE:
                    case Types.NUMERIC:
                    case Types.DECIMAL:
                    case Types.NULL:
                    case Types.OTHER:
                    case Types.JAVA_OBJECT:
                    case Types.DISTINCT:
                    case Types.STRUCT:
                    case Types.ARRAY:
                    case Types.REF:
                    case Types.DATALINK:
                    case Types.BOOLEAN:
                    case Types.ROWID:
                    case Types.NCHAR:
                    case Types.NVARCHAR:
                    case Types.LONGNVARCHAR:
                    case Types.NCLOB:
                    case Types.SQLXML:

                        // use getObject for all other known types

                        value.setValue(resultSet.getObject(i));
                        break;

                    default:

                        // otherwise try as string

                        asStringOrObject(value, resultSet, i);
                        break;
                    }

                } catch (Exception e) {

                    try {

                        // ... and on dump, resort to string
                        value.setValue(resultSet.getString(i));

                    } catch (SQLException sqlException) {

                        // catch-all SQLException - yes, this is hideous

                        // noticed with invalid date formatted values in mysql

                        value.setValue("<Error - " + sqlException.getMessage() + ">");
                    }
                }

                if (resultSet.wasNull()) {

                    value.setNull();
                }

                rowData.add(value);
            }

            tableData.add(rowData);

            if (recordCount == maxRecords) {

                break;
            }

        }

        if (Log.isTraceEnabled()) {

            Log.trace("Finished populating table model - " + recordCount + " rows - [ "
                    + MiscUtils.formatDuration(System.currentTimeMillis() - time) + "]");
        }

        fireTableStructureChanged();

    } catch (SQLException e) {

        System.err.println("SQL error populating table model at: " + e.getMessage());
        Log.debug("Table model error - " + e.getMessage(), e);

    } catch (Exception e) {

        if (e instanceof InterruptedException) {

            Log.debug("ResultSet generation interrupted.", e);

        } else {

            String message = e.getMessage();
            if (StringUtils.isBlank(message)) {

                System.err.println("Exception populating table model.");

            } else {

                System.err.println("Exception populating table model at: " + message);
            }

            Log.debug("Table model error - ", e);
        }

    } finally {

        if (resultSet != null) {

            try {

                resultSet.close();

                Statement statement = resultSet.getStatement();
                if (statement != null) {

                    statement.close();
                }

            } catch (SQLException e) {
            }

        }
    }

}

From source file:net.iponweb.hadoop.streaming.parquet.ParquetAsTextOutputFormat.java

public RecordWriter<Text, Text> getRecordWriter(FileSystem fs, JobConf job, String name, Progressable progress)
        throws IOException {

    // find and load schema

    String writeSchema = job.get("iow.streaming.output.schema");
    MessageType s;/*from  w  w  w . j  a va  2  s  .co m*/

    if (writeSchema == null) {

        String schemaFile = job.get("iow.streaming.output.schema.file", "streaming_output_schema");

        if (job.getBoolean("iow.streaming.schema.use.prefix", false)) {
            // guess schema from file name
            // format is: schema:filename
            // with special keyword default - 'default:filename'

            String str[] = name.split(":");
            if (!str[0].equals("default"))
                schemaFile = str[0];

            name = str[1];
        }

        LOG.info("Using schema: " + schemaFile);
        File f = new File(schemaFile);
        try {
            BufferedReader reader = new BufferedReader(new FileReader(f));
            StringBuilder r = new StringBuilder();
            String line;
            while ((line = reader.readLine()) != null)
                r.append(line);

            writeSchema = r.toString();

        } catch (Throwable e) {
            LOG.error("Can't read schema file " + schemaFile);
            Throwables.propagateIfPossible(e, IOException.class);
            throw new RuntimeException(e);
        }
    }
    s = MessageTypeParser.parseMessageType(writeSchema);

    setWriteSupportClass(job, GroupWriteSupport.class);
    GroupWriteSupport.setSchema(s, job);

    CompressionCodecName codec = getCodec(job);
    String extension = codec.getExtension() + ".parquet";
    Path file = getDefaultWorkFile(job, name, extension);

    ParquetRecordWriter<SimpleGroup> realWriter;
    try {
        realWriter = (ParquetRecordWriter<SimpleGroup>) realOutputFormat.getRecordWriter(job, file, codec);
    } catch (InterruptedException e) {
        Thread.interrupted();
        throw new IOException(e);
    }

    return createRecordWriter(realWriter, fs, job, name, progress);
}