List of usage examples for org.deeplearning4j.models.embeddings.wordvectors WordVectorsImpl setLookupTable
public void setLookupTable(@NonNull WeightLookupTable lookupTable)
From source file:de.mpii.docsimilarity.mr.utils.io.WordVectorSerializer.java
License:Apache License
/** * Load word vectors for the given vocab and table * * @param table//from w w w .j a va 2s . co m * the weights to use * @param vocab * the vocab to use * @return wordvectors based on the given parameters */ public static WordVectors fromTableAndVocab(WeightLookupTable table, VocabCache vocab) { WordVectorsImpl vectors = new WordVectorsImpl(); vectors.setLookupTable(table); vectors.setVocab(vocab); return vectors; }
From source file:de.mpii.docsimilarity.mr.utils.io.WordVectorSerializer.java
License:Apache License
/** * Load word vectors from the given pair * * @param pair/* w w w .j a va 2s. co m*/ * the given pair * @return a read only word vectors impl based on the given lookup table and vocab */ public static WordVectors fromPair(Pair<InMemoryLookupTable, VocabCache> pair) { WordVectorsImpl vectors = new WordVectorsImpl(); vectors.setLookupTable(pair.getFirst()); vectors.setVocab(pair.getSecond()); return vectors; }
From source file:edu.umd.umiacs.clip.tools.scor.WordVectorUtils.java
License:Apache License
public static WordVectors loadTxt(File vectorsFile, boolean... normalize) { AbstractCache cache = new AbstractCache<>(); INDArray arrays[] = lines(vectorsFile.toPath()).map(line -> line.split(" ")) .filter(fields -> fields.length > 2).map(split -> { VocabWord word = new VocabWord(1.0, split[0]); word.setIndex(cache.numWords()); cache.addToken(word);//from w ww . j av a 2s . c o m cache.addWordToIndex(word.getIndex(), split[0]); float[] vector = new float[split.length - 1]; range(1, split.length).parallel().forEach(i -> vector[i - 1] = parseFloat(split[i])); return Nd4j.create(vector); }).toArray(size -> new INDArray[size]); INDArray syn = Nd4j.vstack(arrays); InMemoryLookupTable lookupTable = new InMemoryLookupTable.Builder().vectorLength(arrays[0].columns()) .useAdaGrad(false).cache(cache).useHierarchicSoftmax(false).build(); Nd4j.clearNans(syn); if (normalize.length > 0 && normalize[0]) { syn.diviColumnVector(syn.norm2(1)); } lookupTable.setSyn0(syn); WordVectorsImpl vectors = new WordVectorsImpl(); vectors.setLookupTable(lookupTable); vectors.setVocab(cache); return vectors; }