List of usage examples for org.joda.time DateTimeZone forTimeZone
public static DateTimeZone forTimeZone(TimeZone zone)
From source file:com.auditbucket.engine.repo.neo4j.model.MetaHeaderNode.java
License:Open Source License
public MetaHeaderNode(String uniqueKey, @NotEmpty FortressUser createdBy, @NotEmpty MetaInputBean metaInput, @NotEmpty DocumentType documentType) throws DatagioException { this();//from ww w .ja v a 2s .c o m metaKey = uniqueKey; this.fortress = (FortressNode) createdBy.getFortress(); this.documentType = (documentType != null ? documentType.getName().toLowerCase() : ""); callerRef = metaInput.getCallerRef(); //if ( callerRef!=null ) callerKeyRef = this.fortress.getId() + "." + documentType.getId() + "." + (callerRef != null ? callerRef : metaKey); if (name == null) this.name = (callerRef == null ? this.documentType : (this.documentType + "." + callerRef)); this.description = metaInput.getDescription(); indexName = MetaSearchSchema.parseIndex(this.fortress); Date when = metaInput.getWhen(); if (when == null) fortressDate = new DateTime(dateCreated, DateTimeZone.forTimeZone(TimeZone.getTimeZone(this.fortress.getTimeZone()))).getMillis(); else fortressDate = when.getTime(); lastUpdated = fortressDate; this.createdBy = (FortressUserNode) createdBy; this.lastWho = (FortressUserNode) createdBy; this.event = metaInput.getEvent(); this.suppressSearch(metaInput.isSearchSuppressed()); }
From source file:com.auditbucket.engine.repo.neo4j.model.MetaHeaderNode.java
License:Open Source License
@Override @JsonIgnore public DateTime getFortressDateCreated() { return new DateTime(fortressDate, DateTimeZone.forTimeZone(TimeZone.getTimeZone(fortress.getTimeZone()))); }
From source file:com.auditbucket.engine.repo.neo4j.model.TrackLogRelationship.java
License:Open Source License
public TrackLogRelationship(MetaHeader header, ChangeLog log, DateTime fortressWhen) { this();/*from w w w . j ava2 s .c om*/ this.metaHeader = (MetaHeaderNode) header; this.changeLog = (ChangeLogNode) log; if (fortressWhen != null && fortressWhen.getMillis() != 0) { this.fortressWhen = fortressWhen.getMillis(); } else { // "now" in the fortress default timezone this.fortressWhen = new DateTime(sysWhen, DateTimeZone.forTimeZone(TimeZone.getTimeZone(header.getFortress().getTimeZone()))).getMillis(); } }
From source file:com.bbva.arq.devops.ae.mirrorgate.collectors.jira.service.JiraIssuesServiceImpl.java
License:Apache License
@Override public Pageable<IssueDTO> getRecentIssues() { final Counter page = new Counter(PAGE_SIZE); String date = collectorStatusService.getLastExecutionDate() .toDateTime(DateTimeZone.forTimeZone(jiraTimeZone)).toString("yyyy-MM-dd HH:mm"); String query = String.format(ISSUES_QUERY_PATTERN, date, issueTypes); LOGGER.info("-> Running Jira Query: {}", query); return (() -> { Promise<SearchResult> results = client.searchJql(query, PAGE_SIZE, page.inc(), null); return StreamSupport.stream(results.claim().getIssues().spliterator(), false).map(utils::map) .collect(Collectors.toList()); });//w ww. j av a 2 s . c o m }
From source file:com.bloom.runtime.Context.java
public void UpdateUserInfoStmt(String username, Map<String, Object> props_toupdate) throws Exception { if ((props_toupdate == null) || (props_toupdate.isEmpty())) { return;/*from w ww . jav a2 s .c o m*/ } MetaInfo.User u = (MetaInfo.User) get(username, EntityType.USER); if (u == null) { throw new RuntimeException("User " + username + " is not found."); } for (String key : props_toupdate.keySet()) { if (!key.equalsIgnoreCase("oldpassword")) { if ((key.equalsIgnoreCase("password")) || (key.equalsIgnoreCase("newpassword"))) { String providedOldPassword = (String) props_toupdate.get("oldpassword"); if (providedOldPassword == null) { throw new Exception("Must provide 'oldpassword'"); } String userOldPasswordEncrypted = u.getEncryptedPassword(); String providedOldPasswordEncrypted = WASecurityManager.encrypt(providedOldPassword, u.uuid.toEightBytes()); if (!userOldPasswordEncrypted.equals(providedOldPasswordEncrypted)) { throw new Exception("Old password is incorrect"); } String value = (String) props_toupdate.get(key); setUserAttributes(u, "password", value); } else { String value = (String) props_toupdate.get(key); setUserAttributes(u, key, value); } } } this.security_manager.updateUser(u, this.sessionID); if ((HazelcastSingleton.isClientMember()) && (Tungsten.currUserMetaInfo.getName().equals(u.getName()))) { TimeZone jtz = TimeZone.getTimeZone(u.getUserTimeZone()); Tungsten.userTimeZone = u.getUserTimeZone().equals("") ? null : DateTimeZone.forTimeZone(jtz); } }
From source file:com.bloom.runtime.Context.java
public synchronized AuthToken Connect(String uname, String password, String clusterName, String host) throws MetaDataRepositoryException { if ((clusterName != null) && (!clusterName.isEmpty())) { HazelcastInstance in = HazelcastSingleton.initIfPopulated(clusterName, host); if (in == null) { throw new MetaDataRepositoryException("Cluster " + clusterName + " not found"); }/*ww w . jav a 2 s .c o m*/ boolean isCleared = MetadataRepository.getINSTANCE().clear(false); if (!isCleared) { logger.warn("Failed to clear the MDR when changing clusters."); } MonitorModel.resetDbConnection(); } AuthToken session_id; try { String clientId = HazelcastSingleton.get().getLocalEndpoint().getUuid(); session_id = this.security_manager.authenticate(uname, password, clientId, "Tungsten"); } catch (Exception ex) { throw new MetaDataRepositoryException(ex.getLocalizedMessage()); } if (session_id != null) { System.out.println("Successfully connected as " + uname); if (this.sessionID != null) { this.security_manager.logout(this.sessionID); } Tungsten.nodeIDToAuthToken.put(HazelcastSingleton.getNodeId(), session_id); Tungsten.checkAndCleanupAdhoc(Boolean.valueOf(false)); Tungsten.setSessionQueue(WAQueue.getQueue("consoleQueue" + session_id)); Tungsten.setQueuelistener(new WAQueue.Listener() { public void onItem(Object item) { if (Tungsten.isAdhocRunning.get() == true) { Tungsten.prettyPrintEvent(item); } } }); try { Tungsten.getSessionQueue().subscribeForTungsten(Tungsten.getQueuelistener()); } catch (Exception e) { logger.error(e.getMessage(), e); } Tungsten.session_id = session_id; Tungsten.currUserMetaInfo = (MetaInfo.User) MetadataRepository.getINSTANCE() .getMetaObjectByName(EntityType.USER, "Global", uname, null, session_id); TimeZone jtz = TimeZone.getTimeZone(Tungsten.currUserMetaInfo.getUserTimeZone()); Tungsten.userTimeZone = Tungsten.currUserMetaInfo.getUserTimeZone().equals("") ? null : DateTimeZone.forTimeZone(jtz); this.sessionID = session_id; this.curUser = getUser(uname); String curUsrNamespace = this.curUser.getDefaultNamespace(); useNamespace(curUsrNamespace); ConsoleReader.clearHistory(); return session_id; } System.out.println("Could not log in as as " + uname); return null; }
From source file:com.cloudera.recordservice.pig.PigHCatUtil.java
License:Apache License
/** * Converts object from Hive's value system to Pig's value system * see HCatBaseStorer#getJavaObj() for Pig->Hive conversion * @param o object from Hive value system * @return object in Pig value system /*from ww w . j av a 2 s.c o m*/ */ public static Object extractPigObject(Object o, com.cloudera.recordservice.core.Schema.TypeDesc itemType) throws Exception { // Note that HCatRecordSerDe.serializePrimitiveField() will be called before this, // thus some type promotion/conversion may occur: e.g. Short to Integer. We should // refactor this so that it's hapenning in one place per module/product that we are // integrating with. All Pig conversion should be done here, etc. if (o == null) { return null; } Object result; switch (itemType.typeId) { case BOOLEAN: result = ((BooleanWritable) o).get(); break; case TINYINT: result = ((ByteWritable) o).get(); break; case SMALLINT: result = (int) ((ShortWritable) o).get(); break; case INT: result = ((IntWritable) o).get(); break; case BIGINT: result = ((LongWritable) o).get(); break; case FLOAT: result = ((FloatWritable) o).get(); break; case DOUBLE: result = ((DoubleWritable) o).get(); break; case STRING: case VARCHAR: case CHAR: result = o.toString(); break; case TIMESTAMP_NANOS: TimestampNanos timestampNanos = ((TimestampNanosWritable) o).get(); // TODO: make sure this is correct result = new DateTime(timestampNanos.toTimeStamp(), DateTimeZone.forTimeZone(TimeZone.getTimeZone("GMT"))); break; case DECIMAL: Decimal decimal = ((DecimalWritable) o).get(); result = decimal.toBigDecimal(); break; default: result = o; break; } return result; }
From source file:com.daemon.Master.java
License:Open Source License
public static void main(String[] args) { // Set the time zone for the daemon to UTC TimeZone.setDefault(TimeZone.getTimeZone("UTC")); DateTimeZone.setDefault(DateTimeZone.forTimeZone(TimeZone.getTimeZone("UTC"))); System.out.println(Localization.now() + " Master started."); Master m = null;// w w w . j a v a 2s. c o m try { // Start the master m = new Master(); m.run(); } catch (Exception ex) { if (m != null) { m.getLogger().logStackTrace(ex); System.err.println( Localization.now() + " An error occured. Consult Master.log for further information."); } else { System.err.println(Localization.now() + " An error occured."); // Print stack trace to error stream, because there is no log file, yet. ex.printStackTrace(); } } }
From source file:com.daemon.Minion.java
License:Open Source License
public void run() { // Set the time zone for the minion to UTC TimeZone.setDefault(TimeZone.getTimeZone("UTC")); DateTimeZone.setDefault(DateTimeZone.forTimeZone(TimeZone.getTimeZone("UTC"))); System.out.println(prependInfo("Started.")); // Initialize error log _logger = LogManager.getLogger(_logFilename); // Minion logic // We do not want to work with the search term lists as given, but // want to store a lot of meta data for each search term, so we work // with the meta data lists instead. So we need to create and initialize // the new lists. List<SearchTermMetaData> shortTermMetaData = null; List<SearchTermMetaData> longTermMetaData = null; // Initialize map holding all tweets Map<SearchTerm, List<Status>> allTweetsMap = new HashMap<SearchTerm, List<Status>>(); for (SearchTerm term : _searchTerms) { allTweetsMap.put(term, new LinkedList<Status>()); }//from ww w .ja va2s .c o m try { // Get the twitter object for this profile _twitter = _twitterProfile.getTwitterObject(); // Convert the search terms to meta data search terms shortTermMetaData = convertSearchTerms(_shortTerms); longTermMetaData = convertSearchTerms(_longTerms); // We expect to fetch all tweets with one search for each short search term, so there // is no need for a enclosing while loop. MapUtil.fillupMap(allTweetsMap, fetchTweetsForSearchTerms(shortTermMetaData)); // We expect the long search terms to run more than once. For every loop iteration // each search term in the long list gets one search, except for the newer search terms, // which get more search requests per loop iteration. See Minion.fillUpList(...). while (countFilteredSearchTerms(longTermMetaData) < longTermMetaData.size() && _twitterProfile.getUsedRateLimit() < _props.maxRateLimit) { MapUtil.fillupMap(allTweetsMap, fetchTweetsForSearchTerms(longTermMetaData)); } } catch (TwitterException te) { // If there is something wrong with twitter, we are unable to do anything about it _logger.logStackTrace(te, _twitterProfile.getScreenName()); System.err.println(prependInfo("Error during communicating with Twitter. Consult " + _logFilename + " for further information.")); } catch (Exception cnfe) { _logger.logStackTrace(cnfe, _twitterProfile.getScreenName()); System.err.println(prependInfo( "Cannot load the JDBC driver. Consult " + _logFilename + " for further information.")); } finally { int countTweetsTotal = 0; // Used to count new tweets for the same search term (that can be splitted over many // search term meta data objects Map<SearchTerm, Integer> searchTermMap = new HashMap<SearchTerm, Integer>(); for (SearchTerm term : _searchTerms) { searchTermMap.put(term, 0); } // At the end of the session update each search term's interval length // and the count of new fetched tweets // Short terms if (shortTermMetaData != null) { for (SearchTermMetaData metaData : shortTermMetaData) { updateIntervalLength(metaData); metaData.getSearchTerm().setLastFetchedTweetCount(metaData.getTweetCount()); countTweetsTotal += metaData.getTweetCount(); searchTermMap.put(metaData.getSearchTerm(), searchTermMap.get(metaData.getSearchTerm()) + metaData.getTweetCount()); } } // Long terms if (longTermMetaData != null) { for (SearchTermMetaData metaData : longTermMetaData) { updateIntervalLength(metaData); metaData.getSearchTerm().setLastFetchedTweetCount(metaData.getTweetCount()); countTweetsTotal += metaData.getTweetCount(); searchTermMap.put(metaData.getSearchTerm(), searchTermMap.get(metaData.getSearchTerm()) + metaData.getTweetCount()); } } // Output new tweets for search terms for (SearchTerm term : _searchTerms) { System.out.println(prependInfo("Fetched " + searchTermMap.get(term) + " new tweet(s) since last search for term '" + term.getTerm() + "'.")); } // Output for the user System.out.println(prependInfo("Fetched " + countTweetsTotal + " tweets in total")); System.out.println(" for " + _searchTerms.size() + " search term(s),"); System.out.println(" in " + _numRequests + " requests."); // Inform master about finishing the work MessageType messageType = MessageType.MINION_FINISHED; // If the is a limited minion, the type changes if (_limitPerSearchTerm != _props.unlimitedRequestsPerSearchTerm) { messageType = MessageType.LIMITEDMINION_FINISHED; } // Create packages for each search term List<Package> tweetPackages = new LinkedList<Package>(); for (Map.Entry<SearchTerm, List<Status>> entry : allTweetsMap.entrySet()) { // The date of the package is now tweetPackages.add(new Package(entry.getValue(), new SearchTerm(entry.getKey()), new DateTime())); } _master.update(this, new MinionData(messageType, _searchTerms, tweetPackages)); // clear Tweets map Afterwards allTweetsMap.clear(); System.out.println(prependInfo("Exited.")); } }
From source file:com.esofthead.mycollab.core.utils.TimezoneVal.java
License:Open Source License
public DateTimeZone getTimezone() { return DateTimeZone.forTimeZone(timezone); }