List of usage examples for org.apache.commons.collections.map LinkedMap containsKey
public boolean containsKey(Object key)
From source file:edu.isi.pfindr.learn.util.PairsFileIO.java
public static LinkedMap readDistinctElementsIntoMap(String pairsFilename) { File pairsFile = new File(pairsFilename); LinkedMap phenotypeIndexMap = new LinkedMap(); try {/*from www .j a va 2 s .c om*/ List<String> fileWithPairs = FileUtils.readLines(pairsFile); //Read one at a time to consume less memory int index = 0; for (String s : fileWithPairs) { //distinctElementsSet.add(s.split("\t")[0]); //distinctElementsSet.add(s.split("\t")[1]); if (!phenotypeIndexMap.containsKey(s.split("\t")[0])) { phenotypeIndexMap.put(s.split("\t")[0], index); index++; } } for (String s : fileWithPairs) { if (!phenotypeIndexMap.containsKey(s.split("\t")[1])) { phenotypeIndexMap.put(s.split("\t")[1], index); index++; } } System.out.println("Index " + index); } catch (IOException e) { System.out.println("Error while reading/writing file with pairs" + e.getMessage()); e.printStackTrace(); } catch (Exception e) { e.printStackTrace(); } return phenotypeIndexMap; }
From source file:de.innovationgate.wgpublisher.webtml.EventScript.java
/** * Searches the fired events queue for events of a name, beginning at a specified index * @param name The event name to search for * @param index The start index in the queue * @return a list of found events/*from w w w . j av a 2s . c om*/ * @throws WGAPIException */ private List<PortletEvent> findEventsOfName(String name, Long index) throws WGAPIException { List<PortletEvent> foundEvents = new ArrayList<PortletEvent>(); HttpSession session = getPageContext().getSession(); LinkedMap events = TMLPortlet.getFiredEventsQueue(session); if (events.size() == 0) { return foundEvents; } // Find the start index. This is either the index after the last processed index, or - if the last processed // index is not available in the queue - the first index in the queue. if (events.containsKey(index)) { index = (Long) events.nextKey(index); } else { index = (Long) events.firstKey(); } synchronized (events) { PortletEvent event; while (index != null) { event = (PortletEvent) events.get(index); String targetPortletKey = event.getTargetPortletKey(); if (targetPortletKey == null || targetPortletKey.equals(getTMLContext().getportlet().getportletkey())) { if (event.getName().equalsIgnoreCase(name)) { foundEvents.add(event); } } index = (Long) events.nextKey(index); } } return foundEvents; }
From source file:edu.isi.pfindr.learn.util.PairsFileIO.java
public void filterPairsThatExist(String inputFilePath1, String inputFilePath2) { //eg. testdata(the data to check), traindata(original data) //Read the files List<String> phenotypeList1 = new ArrayList<String>(); List<String> phenotypeList2 = new ArrayList<String>(); //sure pairs LinkedMap surePairsAdjacencyMap = new LinkedMap(); try {//from w ww . jav a 2 s. co m phenotypeList1 = FileUtils.readLines(new File(inputFilePath1)); phenotypeList2 = FileUtils.readLines(new File(inputFilePath2)); String[] lineArray; List<String> resultList = new ArrayList<String>(); List<String> surePairsMapValue = null; System.out.println(phenotypeList2.size()); //construct a map of phenotype and its neighbors for sure-pairs for (int i = 0; i < phenotypeList2.size(); i++) { lineArray = phenotypeList2.get(i).split("\t"); surePairsMapValue = new ArrayList<String>(); //if the first value is existing in the map, get the second value if (surePairsAdjacencyMap.containsKey(lineArray[0])) { surePairsMapValue = (List<String>) surePairsAdjacencyMap.get(lineArray[0]); } //System.out.println("SurePairsMapValueSize " + surePairsMapValue.size()); //if the value does not already contain the second, add the string and add it back to the map if (!surePairsMapValue.contains(lineArray[1])) surePairsMapValue.add(lineArray[1]); surePairsAdjacencyMap.put(lineArray[0], surePairsMapValue); //In the same manner, update the adjacency of the second string surePairsMapValue = new ArrayList<String>(); if (surePairsAdjacencyMap.containsKey(lineArray[1])) { surePairsMapValue = (List<String>) surePairsAdjacencyMap.get(lineArray[1]); } if (!surePairsMapValue.contains(lineArray[0])) surePairsMapValue.add(lineArray[0]); surePairsAdjacencyMap.put(lineArray[1], surePairsMapValue); } List valueList = null; for (int i = 0; i < surePairsAdjacencyMap.size(); i++) { System.out.println("Key : " + surePairsAdjacencyMap.get(i) + " Value : " + ((List) surePairsAdjacencyMap.get(surePairsAdjacencyMap.get(i))).size()); /*valueList = (List)surePairsAdjacencyMap.get(surePairsAdjacencyMap.get(i)); for(int j =0; j<valueList.size(); j++) System.out.println("Value :" + valueList.get(j) ); //break;*/ } //Now parse the new pairs file, and check if the pairs already exists in the sure pairs map boolean existsSurePairs = false; System.out.println(phenotypeList1.size()); surePairsMapValue = new ArrayList<String>(); for (int j = 0; j < phenotypeList1.size(); j++) { lineArray = phenotypeList1.get(j).split("\t"); if (surePairsAdjacencyMap.containsKey(lineArray[0])) { surePairsMapValue = (List) surePairsAdjacencyMap.get(lineArray[0]); if (surePairsMapValue.contains(lineArray[1])) { existsSurePairs = true; } } else if (surePairsAdjacencyMap.containsKey(lineArray[1])) { surePairsMapValue = (List) surePairsAdjacencyMap.get(lineArray[1]); if (surePairsMapValue.contains(lineArray[0])) { existsSurePairs = true; } } if (!existsSurePairs) //if it does not exist in surepairs, then write to output file resultList.add(String.format("%s\t%s\t%s", lineArray[0], lineArray[1], lineArray[2])); existsSurePairs = false; } String resultFilePath = inputFilePath1.split("\\.")[0] + "_filtered.txt"; FileUtils.writeLines(new File(resultFilePath), resultList); } catch (IOException ioe) { ioe.printStackTrace(); } }
From source file:de.innovationgate.wgpublisher.webtml.portlet.TMLPortlet.java
public void prepareEventProcessing(Base tag) throws WGAPIException { TMLPortletState sessionContext = getState(); LinkedMap list = TMLPortlet.getFiredEventsQueue(tag.getPageContext().getSession()); // Look if the event queue proceeded since the last processed event if (list.size() > 0) { PortletEvent lastEvent = (PortletEvent) list.get(list.lastKey()); if (lastEvent != null) { if (lastEvent.getIndex() > sessionContext.getLastProcessedEventIndex()) { // Find the start index for processing new events Long startIndex;// w ww . j av a 2 s. c o m Long lastProcessedIndex = new Long(sessionContext.getLastProcessedEventIndex()); if (list.containsKey(lastProcessedIndex)) { startIndex = (Long) list.nextKey(lastProcessedIndex); } else { startIndex = (Long) list.firstKey(); } // Set start index as WebTML option tag.getStatus().setOption(Base.OPTION_PORTLET_EVENT_STARTINDEX, new Long(sessionContext.getLastProcessedEventIndex()), TMLOption.SCOPE_GLOBAL); // Update last processed event index to be the newest event's index sessionContext.setLastProcessedEventIndex(lastEvent.getIndex()); } } } }
From source file:org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumerBaseTest.java
@Test @SuppressWarnings("unchecked") public void testSnapshotStateWithCommitOnCheckpointsEnabled() throws Exception { // -------------------------------------------------------------------- // prepare fake states // -------------------------------------------------------------------- final HashMap<KafkaTopicPartition, Long> state1 = new HashMap<>(); state1.put(new KafkaTopicPartition("abc", 13), 16768L); state1.put(new KafkaTopicPartition("def", 7), 987654321L); final HashMap<KafkaTopicPartition, Long> state2 = new HashMap<>(); state2.put(new KafkaTopicPartition("abc", 13), 16770L); state2.put(new KafkaTopicPartition("def", 7), 987654329L); final HashMap<KafkaTopicPartition, Long> state3 = new HashMap<>(); state3.put(new KafkaTopicPartition("abc", 13), 16780L); state3.put(new KafkaTopicPartition("def", 7), 987654377L); // -------------------------------------------------------------------- final AbstractFetcher<String, ?> fetcher = mock(AbstractFetcher.class); when(fetcher.snapshotCurrentState()).thenReturn(state1, state2, state3); final LinkedMap pendingOffsetsToCommit = new LinkedMap(); FlinkKafkaConsumerBase<String> consumer = getConsumer(fetcher, pendingOffsetsToCommit, true); StreamingRuntimeContext mockRuntimeContext = mock(StreamingRuntimeContext.class); when(mockRuntimeContext.isCheckpointingEnabled()).thenReturn(true); // enable checkpointing consumer.setRuntimeContext(mockRuntimeContext); assertEquals(0, pendingOffsetsToCommit.size()); OperatorStateStore backend = mock(OperatorStateStore.class); TestingListState<Serializable> listState = new TestingListState<>(); when(backend.getSerializableListState(Matchers.any(String.class))).thenReturn(listState); StateInitializationContext initializationContext = mock(StateInitializationContext.class); when(initializationContext.getOperatorStateStore()).thenReturn(backend); when(initializationContext.isRestored()).thenReturn(false, true, true, true); consumer.initializeState(initializationContext); consumer.open(new Configuration()); // checkpoint 1 consumer.snapshotState(new StateSnapshotContextSynchronousImpl(138, 138)); HashMap<KafkaTopicPartition, Long> snapshot1 = new HashMap<>(); for (Serializable serializable : listState.get()) { Tuple2<KafkaTopicPartition, Long> kafkaTopicPartitionLongTuple2 = (Tuple2<KafkaTopicPartition, Long>) serializable; snapshot1.put(kafkaTopicPartitionLongTuple2.f0, kafkaTopicPartitionLongTuple2.f1); }/*w ww.j a v a 2 s . co m*/ assertEquals(state1, snapshot1); assertEquals(1, pendingOffsetsToCommit.size()); assertEquals(state1, pendingOffsetsToCommit.get(138L)); // checkpoint 2 consumer.snapshotState(new StateSnapshotContextSynchronousImpl(140, 140)); HashMap<KafkaTopicPartition, Long> snapshot2 = new HashMap<>(); for (Serializable serializable : listState.get()) { Tuple2<KafkaTopicPartition, Long> kafkaTopicPartitionLongTuple2 = (Tuple2<KafkaTopicPartition, Long>) serializable; snapshot2.put(kafkaTopicPartitionLongTuple2.f0, kafkaTopicPartitionLongTuple2.f1); } assertEquals(state2, snapshot2); assertEquals(2, pendingOffsetsToCommit.size()); assertEquals(state2, pendingOffsetsToCommit.get(140L)); // ack checkpoint 1 consumer.notifyCheckpointComplete(138L); assertEquals(1, pendingOffsetsToCommit.size()); assertTrue(pendingOffsetsToCommit.containsKey(140L)); // checkpoint 3 consumer.snapshotState(new StateSnapshotContextSynchronousImpl(141, 141)); HashMap<KafkaTopicPartition, Long> snapshot3 = new HashMap<>(); for (Serializable serializable : listState.get()) { Tuple2<KafkaTopicPartition, Long> kafkaTopicPartitionLongTuple2 = (Tuple2<KafkaTopicPartition, Long>) serializable; snapshot3.put(kafkaTopicPartitionLongTuple2.f0, kafkaTopicPartitionLongTuple2.f1); } assertEquals(state3, snapshot3); assertEquals(2, pendingOffsetsToCommit.size()); assertEquals(state3, pendingOffsetsToCommit.get(141L)); // ack checkpoint 3, subsumes number 2 consumer.notifyCheckpointComplete(141L); assertEquals(0, pendingOffsetsToCommit.size()); consumer.notifyCheckpointComplete(666); // invalid checkpoint assertEquals(0, pendingOffsetsToCommit.size()); OperatorStateStore operatorStateStore = mock(OperatorStateStore.class); listState = new TestingListState<>(); when(operatorStateStore.getOperatorState(Matchers.any(ListStateDescriptor.class))).thenReturn(listState); // create 500 snapshots for (int i = 100; i < 600; i++) { consumer.snapshotState(new StateSnapshotContextSynchronousImpl(i, i)); listState.clear(); } assertEquals(FlinkKafkaConsumerBase.MAX_NUM_PENDING_CHECKPOINTS, pendingOffsetsToCommit.size()); // commit only the second last consumer.notifyCheckpointComplete(598); assertEquals(1, pendingOffsetsToCommit.size()); // access invalid checkpoint consumer.notifyCheckpointComplete(590); // and the last consumer.notifyCheckpointComplete(599); assertEquals(0, pendingOffsetsToCommit.size()); }