List of usage examples for java.util.concurrent.atomic AtomicLong AtomicLong
public AtomicLong(long initialValue)
From source file:com.facebook.LinkBench.LinkBenchDriverInj.java
/** * Start all runnables at the same time. Then block till all * tasks are completed. Returns the elapsed time (in millisec) * since the start of the first task to the completion of all tasks. *//*from www .j ava 2 s .co m*/ static long concurrentExec(final List<? extends Runnable> tasks, boolean runReq, Random rng) throws Throwable { final CountDownLatch startSignal = new CountDownLatch(tasks.size()); final CountDownLatch doneSignal = new CountDownLatch(tasks.size()); final AtomicLong startTime = new AtomicLong(0); for (final Runnable task : tasks) { new Thread(new Runnable() { @Override public void run() { /* * Run a task. If an uncaught exception occurs, bail * out of the benchmark immediately, since any results * of the benchmark will no longer be valid anyway */ try { startSignal.countDown(); startSignal.await(); long now = System.currentTimeMillis(); startTime.compareAndSet(0, now); task.run(); } catch (Throwable e) { Logger threadLog = Logger.getLogger(ConfigUtil.LINKBENCH_LOGGER); threadLog.error("Unrecoverable exception in worker thread:", e); Runtime.getRuntime().halt(1); } doneSignal.countDown(); } }).start(); } if (runReq) { /* Do logic with injection rate. All tasks above should be waiting on tasks */ long reqTime_ns = System.nanoTime(); double requestrate_ns = ((double) requestrate) / 1e9; long numRequests = ConfigUtil.getLong(props, Config.NUM_REQUESTS); System.out.println("Processing Requests:" + genQueue); try { long runStartTime = System.currentTimeMillis(); long curTime = runStartTime; for (int i = 0; i < numRequests; i++) { reqTime_ns = Timer.waitExpInterval(rng, reqTime_ns, requestrate_ns); // System.out.println("Request time: "+System.currentTimeMillis()); genQueue.put(System.nanoTime()); curTime = System.currentTimeMillis(); if (curTime > runStartTime + maxTime * 1000) { System.out.println("Time limit elapsed"); break; } } // Send stop signal to all requesters for (int i = 0; i < nrequesters; i++) { genQueue.put((long) 0); } } catch (Exception e) { e.printStackTrace(); } } doneSignal.await(); // wait for all threads to finish long endTime = System.currentTimeMillis(); return endTime - startTime.get(); }
From source file:com.dssmp.agent.tailing.FileTailer.java
private void emitStatus() { try {/* w w w .j a v a 2 s. co m*/ Map<String, Object> metrics = getMetrics(); if (flow.logEmitInternalMetrics()) { try { ObjectMapper mapper = new ObjectMapper(); LOGGER.info("{}: File Tailer Status: {}", serviceName(), mapper.writeValueAsString(metrics)); } catch (JsonProcessingException e) { LOGGER.error("{}: Failed when emitting file tailer status metrics.", serviceName(), e); } } AtomicLong zero = new AtomicLong(0); long bytesBehind = Metrics.getMetric(metrics, Metrics.FILE_TAILER_BYTES_BEHIND_METRIC, 0L); int filesBehind = Metrics.getMetric(metrics, Metrics.FILE_TAILER_FILES_BEHIND_METRIC, 0); long bytesConsumed = Metrics.getMetric(metrics, Metrics.PARSER_TOTAL_BYTES_CONSUMED_METRIC, zero).get(); long recordsParsed = Metrics.getMetric(metrics, Metrics.PARSER_TOTAL_RECORDS_PARSED_METRIC, zero).get(); long recordsSent = Metrics.getMetric(metrics, Metrics.SENDER_TOTAL_RECORDS_SENT_METRIC, zero).get(); LOGGER.info( "{}: Tailer Progress: Tailer has parsed {} records ({} bytes), and has successfully sent {} records to destination.", serviceName(), recordsParsed, bytesConsumed, recordsSent); String msg = String.format("%s: Tailer is %02f MB (%d bytes) behind.", serviceName(), bytesBehind / 1024 / 1024.0, bytesBehind); if (filesBehind > 0) { msg += String.format(" There are %d file(s) newer than current file(s) being tailed.", filesBehind); } if (bytesBehind >= Metrics.BYTES_BEHIND_WARN_LEVEL) { LOGGER.warn(msg); } else if (bytesBehind >= Metrics.BYTES_BEHIND_INFO_LEVEL || agentContext.logEmitInternalMetrics()) { LOGGER.info(msg); } else if (bytesBehind > 0) { LOGGER.debug(msg); } } catch (Exception e) { LOGGER.error("{}: Failed while emitting tailer status.", serviceName(), e); } }
From source file:org.apache.nifi.controller.repository.FileSystemRepository.java
private synchronized void initializeRepository() throws IOException { final Map<String, Path> realPathMap = new HashMap<>(); final ExecutorService executor = Executors.newFixedThreadPool(containers.size()); final List<Future<Long>> futures = new ArrayList<>(); // Run through each of the containers. For each container, create the sections if necessary. // Then, we need to scan through the archived data so that we can determine what the oldest // archived data is, so that we know when we have to start aging data off. for (final Map.Entry<String, Path> container : containers.entrySet()) { final String containerName = container.getKey(); final ContainerState containerState = containerStateMap.get(containerName); final Path containerPath = container.getValue(); final boolean pathExists = Files.exists(containerPath); final Path realPath; if (pathExists) { realPath = containerPath.toRealPath(); } else {//from w w w . java 2s .c o m realPath = Files.createDirectories(containerPath).toRealPath(); } for (int i = 0; i < SECTIONS_PER_CONTAINER; i++) { Files.createDirectories(realPath.resolve(String.valueOf(i))); } realPathMap.put(containerName, realPath); // We need to scan the archive directories to find out the oldest timestamp so that know whether or not we // will have to delete archived data based on time threshold. Scanning all of the directories can be very // expensive because of all of the disk accesses. So we do this in multiple threads. Since containers are // often unique to a disk, we just map 1 thread to each container. final Callable<Long> scanContainer = new Callable<Long>() { @Override public Long call() throws IOException { final AtomicLong oldestDateHolder = new AtomicLong(0L); // the path already exists, so scan the path to find any files and update maxIndex to the max of // all filenames seen. Files.walkFileTree(realPath, new SimpleFileVisitor<Path>() { @Override public FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException { LOG.warn("Content repository contains un-readable file or directory '" + file.getFileName() + "'. Skipping. ", exc); return FileVisitResult.SKIP_SUBTREE; } @Override public FileVisitResult visitFile(final Path file, final BasicFileAttributes attrs) throws IOException { if (attrs.isDirectory()) { return FileVisitResult.CONTINUE; } // Check if this is an 'archive' directory final Path relativePath = realPath.relativize(file); if (relativePath.getNameCount() > 3 && ARCHIVE_DIR_NAME.equals(relativePath.subpath(1, 2).toString())) { final long lastModifiedTime = getLastModTime(file); if (lastModifiedTime < oldestDateHolder.get()) { oldestDateHolder.set(lastModifiedTime); } containerState.incrementArchiveCount(); } return FileVisitResult.CONTINUE; } }); return oldestDateHolder.get(); } }; // If the path didn't exist to begin with, there's no archive directory, so don't bother scanning. if (pathExists) { futures.add(executor.submit(scanContainer)); } } executor.shutdown(); for (final Future<Long> future : futures) { try { final Long oldestDate = future.get(); if (oldestDate < oldestArchiveDate.get()) { oldestArchiveDate.set(oldestDate); } } catch (final ExecutionException | InterruptedException e) { if (e.getCause() instanceof IOException) { throw (IOException) e.getCause(); } else { throw new RuntimeException(e); } } } containers.clear(); containers.putAll(realPathMap); }
From source file:com.milaboratory.core.alignment.KAlignerTest.java
@Test public void testRandomCorrectnessConcurrent() throws Exception { KAlignerParameters p = gParams.clone().setMapperKValue(6).setAlignmentStopPenalty(Integer.MIN_VALUE) .setMapperAbsoluteMinScore(2.1f).setMapperMinSeedsDistance(4); p.setScoring(new LinearGapAlignmentScoring(NucleotideSequence.ALPHABET, ScoringUtils.getSymmetricMatrix(4, -4, 4), -5)).setMaxAdjacentIndels(2); KAlignerParameters[] params = new KAlignerParameters[] { p.clone(), p.clone().setFloatingLeftBound(true), p.clone().setFloatingRightBound(true), p.clone().setFloatingLeftBound(true).setFloatingRightBound(true) }; RandomDataGenerator rdi = new RandomDataGenerator(new Well19937c(127368647891L)); final int baseSize = its(400, 2000); final int total = its(3000, 30000); final int threadCount = 20; int i, id;/* w ww.j a v a 2 s .co m*/ final NucleotideMutationModel mutationModel = MutationModels.getEmpiricalNucleotideMutationModel() .multiplyProbabilities(2.0); mutationModel.reseed(12343L); for (final KAlignerParameters parameters : params) { final KAligner aligner = new KAligner(parameters); final AtomicInteger correct = new AtomicInteger(0), incorrect = new AtomicInteger(0), miss = new AtomicInteger(0), scoreError = new AtomicInteger(0), random = new AtomicInteger(0); final List<NucleotideSequence> ncs = new ArrayList<>(baseSize); for (i = 0; i < baseSize; ++i) { NucleotideSequence reference = randomSequence(NucleotideSequence.ALPHABET, rdi, 100, 300); ncs.add(reference); aligner.addReference(reference); } final AtomicInteger counter = new AtomicInteger(total); Thread[] threads = new Thread[threadCount]; final AtomicLong time = new AtomicLong(0L); final AtomicLong seedCounter = new AtomicLong(1273L); for (i = 0; i < threadCount; ++i) { threads[i] = new Thread() { @Override public void run() { long timestamp; //Different seed for different thread. RandomDataGenerator rdi = new RandomDataGenerator( new Well19937c(seedCounter.addAndGet(117L))); while (counter.decrementAndGet() >= 0) { int id = rdi.nextInt(0, baseSize - 1); NucleotideSequence ref = ncs.get(id); int trimRight, trimLeft; boolean addLeft, addRight; if (parameters.isFloatingLeftBound()) { trimLeft = rdi.nextInt(0, ref.size() / 3); addLeft = true; } else { if (rdi.nextInt(0, 1) == 0) { trimLeft = 0; addLeft = true; } else { trimLeft = rdi.nextInt(0, ref.size() / 3); addLeft = false; } } if (parameters.isFloatingRightBound()) { trimRight = rdi.nextInt(0, ref.size() / 3); addRight = true; } else { if (rdi.nextInt(0, 1) == 0) { trimRight = 0; addRight = true; } else { trimRight = rdi.nextInt(0, ref.size() / 3); addRight = false; } } NucleotideSequence subSeq = ref.getRange(trimLeft, ref.size() - trimRight); NucleotideSequence left = addLeft ? randomSequence(NucleotideSequence.ALPHABET, rdi, 10, 30) : EMPTY; NucleotideSequence right = addRight ? randomSequence(NucleotideSequence.ALPHABET, rdi, 10, 30) : EMPTY; int[] subSeqMutations; Mutations<NucleotideSequence> mmutations; synchronized (mutationModel) { mmutations = generateMutations(subSeq, mutationModel); subSeqMutations = mmutations.getAllMutations(); } float actionScore = AlignmentUtils.calculateScore(parameters.getScoring(), subSeq.size(), mmutations); int indels = 0; for (int mut : subSeqMutations) if (isDeletion(mut) || isInsertion(mut)) ++indels; NucleotideSequence target = left.concatenate(mutate(subSeq, subSeqMutations)) .concatenate(right); timestamp = System.nanoTime(); KAlignmentResult result = aligner.align(target); time.addAndGet(System.nanoTime() - timestamp); boolean found = false; for (KAlignmentHit hit : result.hits) { if (hit.getId() == id) { //System.out.println(hit.getAlignmentScore()); found = true; if (!parameters.isFloatingLeftBound()) Assert.assertTrue(hit.getAlignment().getSequence1Range().getFrom() == 0 || hit.getAlignment().getSequence2Range().getFrom() == 0); if (!parameters.isFloatingRightBound()) Assert.assertTrue(hit.getAlignment().getSequence1Range().getTo() == ref .size() || hit.getAlignment().getSequence2Range().getTo() == target.size()); if (hit.getAlignment().getScore() < actionScore && indels <= parameters.getMaxAdjacentIndels()) { scoreError.incrementAndGet(); //System.out.println(target); //System.out.println(left); //printAlignment(subSeq, subSeqMutations); //System.out.println(right); //printHitAlignment(hit); ////printAlignment(ncs.get(hit.getId()).getRange(hit.getAlignment().getSequence1Range()), //// hit.getAlignment().getMutations()); //found = true; } } else { //printHitAlignment(hit); //System.out.println(hit.getAlignmentScore()); incorrect.incrementAndGet(); } } if (found) correct.incrementAndGet(); else { if (indels <= parameters.getMaxAdjacentIndels()) { miss.incrementAndGet(); //System.out.println(target); //System.out.println(left); //printAlignment(subSeq, subSeqMutations); //System.out.println(right); } } NucleotideSequence randomSequence = randomSequence(NucleotideSequence.ALPHABET, rdi, target.size() - 1, target.size()); for (KAlignmentHit hit : aligner.align(randomSequence).hits) { hit.calculateAlignmnet(); if (hit.getAlignment().getScore() >= 110.0) random.incrementAndGet(); } } } }; } for (i = 0; i < threadCount; ++i) threads[i].start(); for (i = 0; i < threadCount; ++i) threads[i].join(); System.out.println("C=" + correct.get() + ";I=" + incorrect.get() + ";M=" + miss.get() + ";ScE=" + scoreError.get() + ";R=" + (1.0 * random.get() / baseSize / total) + " AlignmentTime = " + time(time.get() / total)); Assert.assertEquals(1.0, 1.0 * correct.get() / total, 0.01); Assert.assertEquals(0.0, 1.0 * incorrect.get() / total, 0.001); Assert.assertEquals(0.0, 1.0 * miss.get() / total, 0.001); Assert.assertEquals(0.0, 1.0 * scoreError.get() / total, 0.001); Assert.assertEquals(0.0, 1.0 * random.get() / total / baseSize, 5E-6); } }
From source file:com.facebook.presto.accumulo.tools.RewriteMetricsTask.java
private void incrementMetric(Map<Text, Map<Text, Map<ColumnVisibility, AtomicLong>>> rowMap, Text row, Text family, ColumnVisibility visibility) { Map<Text, Map<ColumnVisibility, AtomicLong>> familyMap = rowMap.get(row); if (familyMap == null) { familyMap = new HashMap<>(); rowMap.put(row, familyMap);//from w w w. j ava 2s.co m } // Increment column cardinality Map<ColumnVisibility, AtomicLong> visibilityMap = familyMap.get(family); if (visibilityMap == null) { visibilityMap = new HashMap<>(); visibilityMap.put(new ColumnVisibility(), new AtomicLong(0)); familyMap.put(family, visibilityMap); } if (visibilityMap.containsKey(visibility)) { visibilityMap.get(visibility).incrementAndGet(); } else { visibilityMap.put(visibility, new AtomicLong(1)); } }
From source file:com.amazon.kinesis.streaming.agent.tailing.FileTailer.java
private void emitStatus() { try {/* w w w .j a v a2 s. co m*/ Map<String, Object> metrics = getMetrics(); if (flow.logEmitInternalMetrics()) { try { ObjectMapper mapper = new ObjectMapper(); LOGGER.info("{}: File Tailer Status: {}", serviceName(), mapper.writeValueAsString(metrics)); } catch (JsonProcessingException e) { LOGGER.error("{}: Failed when emitting file tailer status metrics.", serviceName(), e); } } AtomicLong zero = new AtomicLong(0); long bytesBehind = Metrics.getMetric(metrics, Metrics.FILE_TAILER_BYTES_BEHIND_METRIC, 0L); int filesBehind = Metrics.getMetric(metrics, Metrics.FILE_TAILER_FILES_BEHIND_METRIC, 0); long bytesConsumed = Metrics.getMetric(metrics, Metrics.PARSER_TOTAL_BYTES_CONSUMED_METRIC, zero).get(); long recordsParsed = Metrics.getMetric(metrics, Metrics.PARSER_TOTAL_RECORDS_PARSED_METRIC, zero).get(); long recordsProcessed = Metrics.getMetric(metrics, Metrics.PARSER_TOTAL_RECORDS_PROCESSED_METRIC, zero) .get(); long recordsSkipped = Metrics.getMetric(metrics, Metrics.PARSER_TOTAL_RECORDS_SKIPPED_METRIC, zero) .get(); long recordsSent = Metrics.getMetric(metrics, Metrics.SENDER_TOTAL_RECORDS_SENT_METRIC, zero).get(); LOGGER.info( "{}: Tailer Progress: Tailer has parsed {} records ({} bytes), transformed {} records, skipped {} records, and has successfully sent {} records to destination.", serviceName(), recordsParsed, bytesConsumed, recordsProcessed, recordsSkipped, recordsSent); String msg = String.format("%s: Tailer is %02f MB (%d bytes) behind.", serviceName(), bytesBehind / 1024 / 1024.0, bytesBehind); if (filesBehind > 0) { msg += String.format(" There are %d file(s) newer than current file(s) being tailed.", filesBehind); } if (bytesBehind >= Metrics.BYTES_BEHIND_WARN_LEVEL) { LOGGER.warn(msg); } else if (bytesBehind >= Metrics.BYTES_BEHIND_INFO_LEVEL || agentContext.logEmitInternalMetrics()) { LOGGER.info(msg); } else if (bytesBehind > 0) { LOGGER.debug(msg); } } catch (Exception e) { LOGGER.error("{}: Failed while emitting tailer status.", serviceName(), e); } }
From source file:org.lendingclub.mercator.docker.SwarmScanner.java
public void scanTasksForSwarm(String swarmClusterId) { logger.info("scanning tasks for swarm: {}", swarmClusterId); AtomicLong earlistUpdate = new AtomicLong(Long.MAX_VALUE); AtomicBoolean error = new AtomicBoolean(false); JsonNode response = getRestClient().getTasks(); response.forEach(it -> {/*from ww w. j av a2s . com*/ try { earlistUpdate.set(Math.min(earlistUpdate.get(), saveTask(it))); } catch (Exception e) { logger.warn("problem updating task", e); error.set(true); } }); if (error.get() == false) { if (earlistUpdate.get() < System.currentTimeMillis()) { dockerScanner.getNeoRxClient().execCypher( "match (x:DockerTask) where x.swarmClusterId={swarmClusterId} and x.updateTs<{cutoff} detach delete x", "cutoff", earlistUpdate.get(), "swarmClusterId", swarmClusterId); } } }
From source file:com.facebook.presto.accumulo.tools.RewriteIndex.java
private void setRowIdStatuses(Connector connector, AccumuloTable table, long timestamp, Multimap<ByteBuffer, Mutation> queryIndexEntries, Map<ByteBuffer, RowStatus> rowIdStatuses) throws TableNotFoundException { // Set ranges to all row IDs that we have no status for List<Range> queryRanges = queryIndexEntries.keySet().stream().filter(x -> !rowIdStatuses.containsKey(x)) .map(x -> new Range(new Text(x.array()))).collect(Collectors.toList()); if (queryRanges.size() == 0) { return;/*w w w. ja v a2 s. c o m*/ } BatchScanner scanner = connector.createBatchScanner(table.getFullTableName(), auths, 10); scanner.setRanges(queryRanges); IteratorSetting iteratorSetting = new IteratorSetting(Integer.MAX_VALUE, TimestampFilter.class); TimestampFilter.setEnd(iteratorSetting, timestamp, true); scanner.addScanIterator(iteratorSetting); scanner.addScanIterator(new IteratorSetting(1, FirstEntryInRowIterator.class)); // Make a copy of all the row IDs we are querying on to back-fill collection Set<ByteBuffer> allRowIDs = new HashSet<>(queryIndexEntries.keySet()); // Scan the data table, removing all known row IDs and setting their status to present Text text = new Text(); for (Entry<Key, Value> entry : scanner) { ByteBuffer rowID = ByteBuffer.wrap(entry.getKey().getRow(text).copyBytes()); allRowIDs.remove(rowID); // Assert that this entry is new if (rowIdStatuses.put(rowID, RowStatus.PRESENT) != null) { throw new RuntimeException( format("Internal error, row %s already has status", new String(rowID.array(), UTF_8))); } } scanner.close(); AtomicLong newlyAbsent = new AtomicLong(0); // Back-fill the absent map -- rows may already be flagged as absent allRowIDs.forEach(rowID -> { RowStatus existingStatus = rowIdStatuses.get(rowID); if (existingStatus == null) { newlyAbsent.incrementAndGet(); rowIdStatuses.put(rowID, RowStatus.ABSENT); } else if (existingStatus == RowStatus.PRESENT) { throw new RuntimeException(format("Internal error, row %s already has PRESENT status", new String(rowID.array(), UTF_8))); } }); }
From source file:org.apache.hc.core5.benchmark.HttpBenchmark.java
private Results doExecute(final HttpAsyncRequester requester, final Stats stats) throws Exception { final URI requestUri = config.getUri(); final HttpHost host = new HttpHost(requestUri.getScheme(), requestUri.getHost(), requestUri.getPort()); final AtomicLong requestCount = new AtomicLong(config.getRequests()); final HttpVersion version = HttpVersion.HTTP_1_1; final CountDownLatch completionLatch = new CountDownLatch(config.getConcurrencyLevel()); final BenchmarkWorker[] workers = new BenchmarkWorker[config.getConcurrencyLevel()]; for (int i = 0; i < workers.length; i++) { final HttpCoreContext context = HttpCoreContext.create(); context.setProtocolVersion(version); final BenchmarkWorker worker = new BenchmarkWorker(requester, host, context, requestCount, completionLatch, stats, config); workers[i] = worker;/*from w w w.j a v a 2s . c om*/ } final long deadline = config.getTimeLimit() != null ? config.getTimeLimit().toMillis() : Long.MAX_VALUE; final long startTime = System.currentTimeMillis(); for (int i = 0; i < workers.length; i++) { workers[i].execute(); } completionLatch.await(deadline, TimeUnit.MILLISECONDS); if (config.getVerbosity() >= 3) { System.out.println("...done"); } final long endTime = System.currentTimeMillis(); for (int i = 0; i < workers.length; i++) { workers[i].releaseResources(); } return new Results(stats.getServerName(), stats.getVersion(), host.getHostName(), host.getPort() > 0 ? host.getPort() : host.getSchemeName().equalsIgnoreCase("https") ? 443 : 80, requestUri.toASCIIString(), stats.getContentLength(), config.getConcurrencyLevel(), endTime - startTime, stats.getSuccessCount(), stats.getFailureCount(), stats.getKeepAliveCount(), stats.getTotalBytesRecv(), stats.getTotalBytesSent(), stats.getTotalContentLength()); }
From source file:net.dv8tion.jda.core.utils.PermissionUtil.java
/** * Gets the {@code long} representation of the effective permissions allowed for this {@link net.dv8tion.jda.core.entities.Member Member} * in this {@link net.dv8tion.jda.core.entities.Channel Channel}. This can be used in conjunction with * {@link net.dv8tion.jda.core.Permission#getPermissions(long) Permission.getPermissions(long)} to easily get a list of all * {@link net.dv8tion.jda.core.Permission Permissions} that this member can use in this {@link net.dv8tion.jda.core.entities.Channel Channel}. * <br>This functions very similarly to how {@link net.dv8tion.jda.core.entities.Role#getPermissionsRaw() Role.getPermissionsRaw()}. * * @param channel/*from w w w .j a v a2 s. c o m*/ * The {@link net.dv8tion.jda.core.entities.Channel Channel} being checked. * @param member * The {@link net.dv8tion.jda.core.entities.Member Member} whose permissions are being checked. * * @throws IllegalArgumentException * if any of the provided parameters is {@code null} * or the provided entities are not from the same guild * * @return The {@code long} representation of the effective permissions that this {@link net.dv8tion.jda.core.entities.Member Member} * has in this {@link net.dv8tion.jda.core.entities.Channel Channel}. */ public static long getEffectivePermission(Channel channel, Member member) { Checks.notNull(channel, "Channel"); Checks.notNull(member, "Member"); Checks.check(channel.getGuild().equals(member.getGuild()), "Provided channel and provided member are not of the same guild!"); if (member.isOwner()) { // Owner effectively has all permissions return Permission.ALL_PERMISSIONS; } long permission = getEffectivePermission(member); final long admin = Permission.ADMINISTRATOR.getRawValue(); if (isApplied(permission, admin)) return Permission.ALL_PERMISSIONS; AtomicLong allow = new AtomicLong(0); AtomicLong deny = new AtomicLong(0); getExplicitOverrides(channel, member, allow, deny); permission = apply(permission, allow.get(), deny.get()); final long viewChannel = Permission.VIEW_CHANNEL.getRawValue(); //When the permission to view the channel is not applied it is not granted // This means that we have no access to this channel at all return isApplied(permission, viewChannel) ? permission : 0; /* // currently discord doesn't implicitly grant permissions that the user can grant others // so instead the user has to explicitly make an override to grant them the permission in order to be granted that permission // yes this makes no sense but what can i do, the devs don't like changing things apparently... // I've been told half a year ago this would be changed but nothing happens // so instead I'll just bend over for them so people get "correct" permission checks... // // only time will tell if something happens and I can finally re-implement this section wew final long managePerms = Permission.MANAGE_PERMISSIONS.getRawValue(); final long manageChannel = Permission.MANAGE_CHANNEL.getRawValue(); if ((permission & (managePerms | manageChannel)) != 0) { // In channels, MANAGE_CHANNEL and MANAGE_PERMISSIONS grant full text/voice permissions permission |= Permission.ALL_TEXT_PERMISSIONS | Permission.ALL_VOICE_PERMISSIONS; } */ }