Example usage for com.google.common.collect Maps filterKeys

List of usage examples for com.google.common.collect Maps filterKeys

Introduction

In this page you can find the example usage for com.google.common.collect Maps filterKeys.

Prototype

@CheckReturnValue
public static <K, V> BiMap<K, V> filterKeys(BiMap<K, V> unfiltered, final Predicate<? super K> keyPredicate) 

Source Link

Document

Returns a bimap containing the mappings in unfiltered whose keys satisfy a predicate.

Usage

From source file:com.isotrol.impe3.web20.impl.CommunitiesServiceImpl.java

private CommunityEntity fill(CommunityEntity entity, CommunityDTO dto) {
    final Calendar date = Calendar.getInstance();
    date.setTime(dto.getDate());/*from  w w  w . ja  v  a  2s.  c o m*/
    entity.setDate(date);
    entity.setDescription(dto.getDescription());
    entity.setCode(dto.getCode());
    entity.setName(dto.getName());

    final Map<String, String> properties = entity.getProperties();
    properties.clear();
    final Map<String, String> dtopr = dto.getProperties();
    if (dtopr != null) {
        properties.putAll(Maps.filterKeys(Maps.filterValues(dtopr, notNull()), notNull()));
    }

    return entity;
}

From source file:com.palantir.atlasdb.transaction.impl.SerializableTransaction.java

private void verifyRows(Transaction ro) {
    for (String table : rowsRead.keySet()) {
        final ConcurrentNavigableMap<Cell, byte[]> readsForTable = getReadsForTable(table);
        Multimap<ColumnSelection, byte[]> map = Multimaps.newSortedSetMultimap(
                Maps.<ColumnSelection, Collection<byte[]>>newHashMap(), new Supplier<SortedSet<byte[]>>() {
                    @Override//from   w  w w . j av  a 2 s . c  o  m
                    public TreeSet<byte[]> get() {
                        return Sets.newTreeSet(UnsignedBytes.lexicographicalComparator());
                    }
                });
        for (RowRead r : rowsRead.get(table)) {
            map.putAll(r.cols, r.rows);
        }
        for (final ColumnSelection cols : map.keySet()) {
            for (List<byte[]> batch : Iterables.partition(map.get(cols), 1000)) {
                SortedMap<byte[], RowResult<byte[]>> currentRows = ro.getRows(table, batch, cols);
                for (byte[] row : batch) {
                    RowResult<byte[]> currentRow = currentRows.get(row);
                    Map<Cell, byte[]> orignalReads = readsForTable
                            .tailMap(Cells.createSmallestCellForRow(row), true)
                            .headMap(Cells.createLargestCellForRow(row), true);

                    // We want to filter out all our reads to just the set that matches our column selection.
                    orignalReads = Maps.filterKeys(orignalReads, new Predicate<Cell>() {
                        @Override
                        public boolean apply(Cell input) {
                            return cols.contains(input.getColumnName());
                        }
                    });

                    if (writesByTable.get(table) != null) {
                        // We don't want to verify any reads that we wrote to cause we will just read our own values.
                        // NB: We filter our write set out here because our normal SI checking handles this case to ensure the value hasn't changed.
                        orignalReads = Maps.filterKeys(orignalReads,
                                Predicates.not(Predicates.in(writesByTable.get(table).keySet())));
                    }

                    if (currentRow == null && orignalReads.isEmpty()) {
                        continue;
                    }

                    if (currentRow == null) {
                        throw TransactionSerializableConflictException.create(table, getTimestamp(),
                                System.currentTimeMillis() - timeCreated);
                    }

                    Map<Cell, byte[]> currentCells = Maps2.fromEntries(currentRow.getCells());
                    if (writesByTable.get(table) != null) {
                        // We don't want to verify any reads that we wrote to cause we will just read our own values.
                        // NB: We filter our write set out here because our normal SI checking handles this case to ensure the value hasn't changed.
                        currentCells = Maps.filterKeys(currentCells,
                                Predicates.not(Predicates.in(writesByTable.get(table).keySet())));
                    }
                    if (!areMapsEqual(orignalReads, currentCells)) {
                        throw TransactionSerializableConflictException.create(table, getTimestamp(),
                                System.currentTimeMillis() - timeCreated);
                    }
                }
            }
        }

    }
}

From source file:clocker.docker.location.DockerContainerLocation.java

@Override
public int copyFrom(final Map<String, ?> props, final String remote, final String local) {
    Map<String, ?> nonPortProps = Maps.filterKeys(props, Predicates.not(Predicates.containsPattern("port")));
    boolean entitySsh = Boolean.TRUE.equals(entity.config().get(DockerContainer.DOCKER_USE_SSH));
    boolean dockerSsh = Boolean.TRUE.equals(getOwner().config().get(DockerContainer.DOCKER_USE_SSH));
    if (entitySsh && dockerSsh) {
        return super.copyFrom(nonPortProps, remote, local);
    } else {/* w w w .j a v a 2  s.c o  m*/
        String tmp = Os.mergePaths("/tmp",
                Joiner.on('-').join(dockerContainer.getId(), Urls.getBasename(local), Strings.makeRandomId(4)));
        String cp = String.format("cp %s:%s %s", dockerContainer.getContainerId(), remote, tmp);
        String output = getOwner().getDockerHost().runDockerCommand(cp);
        hostMachine.copyFrom(nonPortProps, tmp, local);
        LOG.info("Copying from {}:{} to {} - result: {}",
                new Object[] { dockerContainer.getContainerId(), remote, local, output });
        return 0;
    }
}

From source file:org.pentaho.di.trans.dataservice.jdbc.ThinConnection.java

private ThinConnection extractProperties(Map<String, String> arguments) {
    proxyHostname = arguments.get(ARG_PROXYHOSTNAME);
    proxyPort = arguments.get(ARG_PROXYPORT);
    nonProxyHosts = arguments.get(ARG_NONPROXYHOSTS);
    debugTransFilename = arguments.get(ARG_DEBUGTRANS);

    parameters = ImmutableMap.copyOf(Maps.filterKeys(arguments, new Predicate<String>() {
        @Override/*from w  ww  .j  a v  a 2 s. c om*/
        public boolean apply(String input) {
            return input.startsWith("PARAMETER_");
        }
    }));

    username = arguments.get("user");
    password = arguments.get("password");

    return this;
}

From source file:co.cask.cdap.internal.app.runtime.distributed.AbstractProgramTwillRunnable.java

/**
 * Creates program options. It contains program and user arguments as passed form the distributed program runner.
 * Extra program arguments are inserted based on the environment information (e.g. host, instance id). Also all
 * configs available through the TwillRunnable configs are also available through program arguments.
 *//*w  w  w.  j a  va2  s.co  m*/
private ProgramOptions createProgramOptions(CommandLine cmdLine, TwillContext context,
        Map<String, String> configs) {
    ProgramOptions original = GSON.fromJson(cmdLine.getOptionValue(RunnableOptions.PROGRAM_OPTIONS),
            ProgramOptions.class);

    // Overwrite them with environmental information
    Map<String, String> arguments = Maps.newHashMap(original.getArguments().asMap());
    arguments.put(ProgramOptionConstants.INSTANCE_ID, Integer.toString(context.getInstanceId()));
    arguments.put(ProgramOptionConstants.INSTANCES, Integer.toString(context.getInstanceCount()));
    arguments.put(ProgramOptionConstants.RUN_ID,
            original.getArguments().getOption(ProgramOptionConstants.RUN_ID));
    arguments.put(ProgramOptionConstants.TWILL_RUN_ID, context.getApplicationRunId().getId());
    arguments.put(ProgramOptionConstants.HOST, context.getHost().getCanonicalHostName());
    arguments
            .putAll(Maps.filterKeys(configs, Predicates.not(Predicates.in(ImmutableSet.of("hConf", "cConf")))));

    return new SimpleProgramOptions(context.getSpecification().getName(), new BasicArguments(arguments),
            original.getUserArguments(), original.isDebug());
}

From source file:org.opendaylight.netvirt.neutronvpn.ChangeUtils.java

public static <T extends DataObject> Map<InstanceIdentifier<T>, T> extractRemovedObjects(
        AsyncDataChangeEvent<InstanceIdentifier<?>, DataObject> changes, Class<T> klazz) {
    Set<InstanceIdentifier<T>> iids = extractRemoved(changes, klazz);
    return Maps.filterKeys(extractOriginal(changes, klazz), Predicates.in(iids));
}

From source file:edu.mit.streamjit.impl.compiler2.Compiler2BlobHost.java

/**
 * Extracts elements from storage and puts them in a DrainData for an
 * interpreter blob./*from  w w  w. j  a va2s.co m*/
 * @param reads read instructions whose load() completed (thus requiring
 * unload())
 * @param drains drain instructions, if we're in the steady-state, or an
 * empty list if we didn't complete init
 */
private void doDrain(List<ReadInstruction> reads, List<DrainInstruction> drains) {
    Stopwatch drainTime = null;
    if (collectTimings)
        drainTime = Stopwatch.createStarted();

    List<Map<Token, Object[]>> data = new ArrayList<>(reads.size() + drains.size());
    for (ReadInstruction i : reads)
        data.add(i.unload());
    for (DrainInstruction i : drains)
        data.add(i.call());
    ImmutableMap<Token, List<Object>> mergedData = CollectionUtils.union((key, value) -> {
        int size = 0;
        for (Object[] v : value)
            size += v.length;
        List<Object> data1 = new ArrayList<>(size);
        for (Object[] v : value)
            data1.addAll(Arrays.asList(v));
        return data1;
    }, data);
    //Try once to write data on output edges, then let the interpreter handle it.
    Predicate<Token> isOutput = Predicates.in(getOutputs());
    for (Map.Entry<Token, List<Object>> e : Maps.filterKeys(mergedData, isOutput).entrySet()) {
        final Buffer b = buffers.get(e.getKey());
        final Object[] d = e.getValue().toArray();
        int written = b.write(d, 0, d.length);
        //Remove the data we wrote.
        e.getValue().subList(0, written).clear();
    }
    DrainData forInterp = new DrainData(mergedData,
            //We put state back in the workers via StateHolders, which are
            //DrainInstructions, so no state in the DrainData.  (It will be
            //in the DrainData produced by the interpreter blob, so
            //distributed will still see it.)
            ImmutableTable.<Integer, String, Object>of());

    Interpreter.InterpreterBlobFactory interpFactory = new Interpreter.InterpreterBlobFactory();
    Blob interp = interpFactory.makeBlob(workers, interpFactory.getDefaultConfiguration(workers), 1, forInterp);
    interp.installBuffers(buffers);
    Runnable interpCode = interp.getCoreCode(0);
    final AtomicBoolean interpFinished = new AtomicBoolean();
    interp.drain(() -> interpFinished.set(true));
    while (!interpFinished.get())
        interpCode.run();
    this.drainData = interp.getDrainData();

    SwitchPoint.invalidateAll(new SwitchPoint[] { sp1, sp2 });
    drainCallback.run();

    if (collectTimings) {
        drainTime.stop();
        System.out.println("total adjust time: " + adjustTime + " over " + adjustCount + " adjusts");
        System.out.println("drain time: " + drainTime);
    }
}

From source file:org.apache.aurora.scheduler.thrift.ReadOnlySchedulerImpl.java

@Override
public Response getJobUpdateDiff(JobUpdateRequest mutableRequest) {
    IJobUpdateRequest request;/*from   w w  w .j a v a  2 s  . c  o  m*/
    try {
        request = IJobUpdateRequest
                .build(new JobUpdateRequest(mutableRequest).setTaskConfig(configurationManager
                        .validateAndPopulate(ITaskConfig.build(mutableRequest.getTaskConfig())).newBuilder()));
    } catch (TaskDescriptionException e) {
        return error(INVALID_REQUEST, e);
    }

    IJobKey job = request.getTaskConfig().getJob();

    return storage.read(storeProvider -> {
        if (storeProvider.getCronJobStore().fetchJob(job).isPresent()) {
            return invalidRequest(NO_CRON);
        }

        JobDiff diff = JobDiff.compute(storeProvider.getTaskStore(), job,
                JobDiff.asMap(request.getTaskConfig(), request.getInstanceCount()),
                request.getSettings().getUpdateOnlyTheseInstances());

        Map<Integer, ITaskConfig> replaced = diff.getReplacedInstances();
        Map<Integer, ITaskConfig> replacements = Maps.asMap(diff.getReplacementInstances(),
                Functions.constant(request.getTaskConfig()));

        Map<Integer, ITaskConfig> add = Maps.filterKeys(replacements,
                Predicates.in(Sets.difference(replacements.keySet(), replaced.keySet())));
        Map<Integer, ITaskConfig> remove = Maps.filterKeys(replaced,
                Predicates.in(Sets.difference(replaced.keySet(), replacements.keySet())));
        Map<Integer, ITaskConfig> update = Maps.filterKeys(replaced,
                Predicates.in(Sets.intersection(replaced.keySet(), replacements.keySet())));

        return ok(
                Result.getJobUpdateDiffResult(new GetJobUpdateDiffResult().setAdd(instancesToConfigGroups(add))
                        .setRemove(instancesToConfigGroups(remove)).setUpdate(instancesToConfigGroups(update))
                        .setUnchanged(instancesToConfigGroups(diff.getUnchangedInstances()))));
    });
}

From source file:com.continuuity.weave.yarn.YarnWeavePreparer.java

private Closeable saveLocalFiles(Map<String, LocalResource> localResources, Set<String> keys)
        throws IOException {
    Map<String, LocalFile> localFiles = Maps.transformEntries(
            Maps.filterKeys(localResources, Predicates.in(keys)),
            new Maps.EntryTransformer<String, LocalResource, LocalFile>() {
                @Override//from   ww w  .  j  a  va  2 s  .c o m
                public LocalFile transformEntry(String key, LocalResource value) {
                    try {
                        return new DefaultLocalFile(key,
                                ConverterUtils.getPathFromYarnURL(value.getResource()).toUri(),
                                value.getTimestamp(), value.getSize(),
                                value.getType() != LocalResourceType.FILE, value.getPattern());
                    } catch (URISyntaxException e) {
                        throw Throwables.propagate(e);
                    }
                }
            });

    LOG.debug("Create and copy localFiles.json");
    Location location = createTempLocation("localFiles", ".json");
    Writer writer = new OutputStreamWriter(location.getOutputStream(), Charsets.UTF_8);
    try {
        new GsonBuilder().registerTypeAdapter(LocalFile.class, new LocalFileCodec()).create()
                .toJson(localFiles.values(), new TypeToken<List<LocalFile>>() {
                }.getType(), writer);
    } finally {
        writer.close();
    }
    LOG.debug("Done localFiles.json");
    localResources.put("localFiles.json", YarnUtils.createLocalResource(location));
    return getCloseable(location);
}

From source file:org.janusgraph.graphdb.olap.computer.FulgoraGraphComputer.java

@Override
public Future<ComputerResult> submit() {
    if (executed)
        throw Exceptions.computerHasAlreadyBeenSubmittedAVertexProgram();
    else/*from   w  ww  .  j a v  a 2s.c  o m*/
        executed = true;

    // it is not possible execute a computer if it has no vertex program nor mapreducers
    if (null == vertexProgram && mapReduces.isEmpty())
        throw GraphComputer.Exceptions.computerHasNoVertexProgramNorMapReducers();
    // it is possible to run mapreducers without a vertex program
    if (null != vertexProgram) {
        GraphComputerHelper.validateProgramOnComputer(this, vertexProgram);
        this.mapReduces.addAll(this.vertexProgram.getMapReducers());
    }

    // if the user didn't set desired persistence/resultgraph, then get from vertex program or else, no persistence
    this.persistMode = GraphComputerHelper.getPersistState(Optional.ofNullable(this.vertexProgram),
            Optional.ofNullable(this.persistMode));
    this.resultGraphMode = GraphComputerHelper.getResultGraphState(Optional.ofNullable(this.vertexProgram),
            Optional.ofNullable(this.resultGraphMode));
    // determine the legality persistence and result graph options
    if (!this.features().supportsResultGraphPersistCombination(this.resultGraphMode, this.persistMode))
        throw GraphComputer.Exceptions.resultGraphPersistCombinationNotSupported(this.resultGraphMode,
                this.persistMode);
    // ensure requested workers are not larger than supported workers
    if (this.numThreads > this.features().getMaxWorkers())
        throw GraphComputer.Exceptions.computerRequiresMoreWorkersThanSupported(this.numThreads,
                this.features().getMaxWorkers());

    memory = new FulgoraMemory(vertexProgram, mapReduces);

    return CompletableFuture.<ComputerResult>supplyAsync(() -> {
        final long time = System.currentTimeMillis();
        if (null != vertexProgram) {
            // ##### Execute vertex program
            vertexMemory = new FulgoraVertexMemory(expectedNumVertices, graph.getIDManager(), vertexProgram);
            // execute the vertex program
            vertexProgram.setup(memory);

            try (VertexProgramScanJob.Executor job = VertexProgramScanJob.getVertexProgramScanJob(graph, memory,
                    vertexMemory, vertexProgram)) {
                for (int iteration = 1;; iteration++) {
                    memory.completeSubRound();
                    vertexMemory.nextIteration(vertexProgram.getMessageScopes(memory));

                    jobId = name + "#" + iteration;
                    StandardScanner.Builder scanBuilder = graph.getBackend().buildEdgeScanJob();
                    scanBuilder.setJobId(jobId);
                    scanBuilder.setNumProcessingThreads(numThreads);
                    scanBuilder.setWorkBlockSize(readBatchSize);
                    scanBuilder.setJob(job);
                    PartitionedVertexProgramExecutor pvpe = new PartitionedVertexProgramExecutor(graph, memory,
                            vertexMemory, vertexProgram);
                    try {
                        //Iterates over all vertices and computes the vertex program on all non-partitioned vertices. For partitioned ones, the data is aggregated
                        ScanMetrics jobResult = scanBuilder.execute().get();
                        long failures = jobResult.get(ScanMetrics.Metric.FAILURE);
                        if (failures > 0) {
                            throw new JanusGraphException("Failed to process [" + failures
                                    + "] vertices in vertex program iteration [" + iteration
                                    + "]. Computer is aborting.");
                        }
                        //Runs the vertex program on all aggregated, partitioned vertices.
                        pvpe.run(numThreads, jobResult);
                        failures = jobResult
                                .getCustom(PartitionedVertexProgramExecutor.PARTITION_VERTEX_POSTFAIL);
                        if (failures > 0) {
                            throw new JanusGraphException("Failed to process [" + failures
                                    + "] partitioned vertices in vertex program iteration [" + iteration
                                    + "]. Computer is aborting.");
                        }
                    } catch (Exception e) {
                        throw new JanusGraphException(e);
                    }

                    vertexMemory.completeIteration();
                    memory.completeSubRound();
                    try {
                        if (this.vertexProgram.terminate(this.memory)) {
                            break;
                        }
                    } finally {
                        memory.incrIteration();
                    }
                }
            }
        }

        // ##### Execute mapreduce jobs
        // Collect map jobs
        Map<MapReduce, FulgoraMapEmitter> mapJobs = new HashMap<>(mapReduces.size());
        for (MapReduce mapReduce : mapReduces) {
            if (mapReduce.doStage(MapReduce.Stage.MAP)) {
                FulgoraMapEmitter mapEmitter = new FulgoraMapEmitter<>(
                        mapReduce.doStage(MapReduce.Stage.REDUCE));
                mapJobs.put(mapReduce, mapEmitter);
            }
        }
        // Execute map jobs
        jobId = name + "#map";
        try (VertexMapJob.Executor job = VertexMapJob.getVertexMapJob(graph, vertexMemory, mapJobs)) {
            StandardScanner.Builder scanBuilder = graph.getBackend().buildEdgeScanJob();
            scanBuilder.setJobId(jobId);
            scanBuilder.setNumProcessingThreads(numThreads);
            scanBuilder.setWorkBlockSize(readBatchSize);
            scanBuilder.setJob(job);
            try {
                ScanMetrics jobResult = scanBuilder.execute().get();
                long failures = jobResult.get(ScanMetrics.Metric.FAILURE);
                if (failures > 0) {
                    throw new JanusGraphException("Failed to process [" + failures
                            + "] vertices in map phase. Computer is aborting.");
                }
                failures = jobResult.getCustom(VertexMapJob.MAP_JOB_FAILURE);
                if (failures > 0) {
                    throw new JanusGraphException(
                            "Failed to process [" + failures + "] individual map jobs. Computer is aborting.");
                }
            } catch (Exception e) {
                throw new JanusGraphException(e);
            }
            // Execute reduce phase and add to memory
            for (Map.Entry<MapReduce, FulgoraMapEmitter> mapJob : mapJobs.entrySet()) {
                FulgoraMapEmitter<?, ?> mapEmitter = mapJob.getValue();
                MapReduce mapReduce = mapJob.getKey();
                mapEmitter.complete(mapReduce); // sort results if a map output sort is defined
                if (mapReduce.doStage(MapReduce.Stage.REDUCE)) {
                    final FulgoraReduceEmitter<?, ?> reduceEmitter = new FulgoraReduceEmitter<>();
                    try (WorkerPool workers = new WorkerPool(numThreads)) {
                        workers.submit(() -> mapReduce.workerStart(MapReduce.Stage.REDUCE));
                        for (final Map.Entry queueEntry : mapEmitter.reduceMap.entrySet()) {
                            if (null == queueEntry)
                                break;
                            workers.submit(() -> mapReduce.reduce(queueEntry.getKey(),
                                    ((Iterable) queueEntry.getValue()).iterator(), reduceEmitter));
                        }
                        workers.submit(() -> mapReduce.workerEnd(MapReduce.Stage.REDUCE));
                    } catch (Exception e) {
                        throw new JanusGraphException("Exception while executing reduce phase", e);
                    }
                    //                    mapEmitter.reduceMap.entrySet().parallelStream().forEach(entry -> mapReduce.reduce(entry.getKey(), entry.getValue().iterator(), reduceEmitter));

                    reduceEmitter.complete(mapReduce); // sort results if a reduce output sort is defined
                    mapReduce.addResultToMemory(this.memory, reduceEmitter.reduceQueue.iterator());
                } else {
                    mapReduce.addResultToMemory(this.memory, mapEmitter.mapQueue.iterator());
                }
            }
        }
        memory.attachReferenceElements(graph);

        // #### Write mutated properties back into graph
        Graph resultgraph = graph;
        if (persistMode == Persist.NOTHING && resultGraphMode == ResultGraph.NEW) {
            resultgraph = EmptyGraph.instance();
        } else if (persistMode != Persist.NOTHING && vertexProgram != null
                && !vertexProgram.getVertexComputeKeys().isEmpty()) {
            //First, create property keys in graph if they don't already exist
            JanusGraphManagement mgmt = graph.openManagement();
            try {
                for (VertexComputeKey key : vertexProgram.getVertexComputeKeys()) {
                    if (!mgmt.containsPropertyKey(key.getKey()))
                        log.warn(
                                "Property key [{}] is not part of the schema and will be created. It is advised to initialize all keys.",
                                key.getKey());
                    mgmt.getOrCreatePropertyKey(key.getKey());
                }
                mgmt.commit();
            } finally {
                if (mgmt != null && mgmt.isOpen())
                    mgmt.rollback();
            }

            //TODO: Filter based on VertexProgram
            Map<Long, Map<String, Object>> mutatedProperties = Maps.transformValues(
                    vertexMemory.getMutableVertexProperties(),
                    new Function<Map<String, Object>, Map<String, Object>>() {
                        @Nullable
                        @Override
                        public Map<String, Object> apply(@Nullable Map<String, Object> o) {
                            return Maps.filterKeys(o, s -> !VertexProgramHelper.isTransientVertexComputeKey(s,
                                    vertexProgram.getVertexComputeKeys()));
                        }
                    });

            if (resultGraphMode == ResultGraph.ORIGINAL) {
                AtomicInteger failures = new AtomicInteger(0);
                try (WorkerPool workers = new WorkerPool(numThreads)) {
                    List<Map.Entry<Long, Map<String, Object>>> subset = new ArrayList<>(
                            writeBatchSize / vertexProgram.getVertexComputeKeys().size());
                    int currentSize = 0;
                    for (Map.Entry<Long, Map<String, Object>> entry : mutatedProperties.entrySet()) {
                        subset.add(entry);
                        currentSize += entry.getValue().size();
                        if (currentSize >= writeBatchSize) {
                            workers.submit(new VertexPropertyWriter(subset, failures));
                            subset = new ArrayList<>(subset.size());
                            currentSize = 0;
                        }
                    }
                    if (!subset.isEmpty())
                        workers.submit(new VertexPropertyWriter(subset, failures));
                } catch (Exception e) {
                    throw new JanusGraphException("Exception while attempting to persist result into graph", e);
                }
                if (failures.get() > 0)
                    throw new JanusGraphException(
                            "Could not persist program results to graph. Check log for details.");
            } else if (resultGraphMode == ResultGraph.NEW) {
                resultgraph = graph.newTransaction();
                for (Map.Entry<Long, Map<String, Object>> vprop : mutatedProperties.entrySet()) {
                    Vertex v = resultgraph.vertices(vprop.getKey()).next();
                    for (Map.Entry<String, Object> prop : vprop.getValue().entrySet()) {
                        v.property(VertexProperty.Cardinality.single, prop.getKey(), prop.getValue());
                    }
                }
            }
        }
        // update runtime and return the newly computed graph
        this.memory.setRuntime(System.currentTimeMillis() - time);
        this.memory.complete();
        return new DefaultComputerResult(resultgraph, this.memory);
    });
}