List of usage examples for org.deeplearning4j.nn.multilayer MultiLayerNetwork rnnTimeStep
public INDArray rnnTimeStep(INDArray input)
From source file:org.ensor.fftmusings.rnn.GravesLSTMCharModellingExample.java
private static String sampleCharactersFromNetwork2(String initialization, MultiLayerNetwork net, CharacterIterator iter, Random rng, int charactersToSample) { StringBuilder sb = new StringBuilder(); //Set up initialization. If no initialization: use a random character if (initialization == null) { initialization = String.valueOf(iter.getRandomCharacter()); }// w w w. j a v a2 s.c o m //Create input for initialization INDArray initializationInput = Nd4j.zeros(iter.inputColumns()); char[] init = initialization.toCharArray(); for (int i = 0; i < init.length; i++) { int idx = iter.convertCharacterToIndex(init[i]); initializationInput.putScalar(new int[] { idx }, 1.0f); } net.rnnClearPreviousState(); INDArray output = net.rnnTimeStep(initializationInput); for (int i = 0; i < charactersToSample; i++) { //Set up next input (single time step) by sampling from previous output INDArray nextInput = Nd4j.zeros(iter.inputColumns()); //Output is a probability distribution. Sample from this for each example we want to generate, and add it to the new input double[] outputProbDistribution = new double[iter.totalOutcomes()]; for (int j = 0; j < outputProbDistribution.length; j++) { outputProbDistribution[j] = output.getDouble(j); } int sampledCharacterIdx = sampleFromDistribution(outputProbDistribution, rng); nextInput.putScalar(new int[] { sampledCharacterIdx }, 1.0f); //Prepare next time step input sb.append(iter.convertIndexToCharacter(sampledCharacterIdx)); //Add sampled character to StringBuilder (human readable output) output = net.rnnTimeStep(nextInput); //Do one time step of forward pass } return sb.toString(); }
From source file:org.ensor.fftmusings.rnn.qft.SampleFromLSTM.java
public static void main(String[] args) throws IOException, Exception { MultiLayerNetwork net = SampleLSTM.load(new File("data/smiths.15.rnn")); Random rng = new Random(12345); List<QuantizedSpectrum> outputList = new ArrayList<>(); net.rnnClearPreviousState();/*from w ww .j a v a2 s. c o m*/ QuantizedSpectrum qsInit = new QuantizedSpectrum(FFTProcess.FFT_WINDOW_SIZE); for (int j = 0; j < qsInit.size(); j++) { int magnitude = (int) (Math.random() * QuantizedSpectrum.MAGNITUDE_QUANTA - 1); int phase = (int) (Math.random() * QuantizedSpectrum.PHASE_QUANTA - 1); qsInit.setSample(j, magnitude, phase); } RNNInterface rnnInterface = new RNNInterface(rng); INDArray initializationInput = rnnInterface.toRNN(qsInit); INDArray output = net.rnnTimeStep(initializationInput); int samplesToMake = 256; for (int i = 0; i < samplesToMake; i++) { QuantizedSpectrum nextSpectrum = rnnInterface.toQS(output); INDArray nextInput = rnnInterface.toRNN(nextSpectrum); outputList.add(nextSpectrum); output = net.rnnTimeStep(nextInput); // output.mul(8); // rnnInterface.dumpOutput(output); } AudioFormat targetFormat = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, 11025.0f, // Sample rate 16, // Sample size (bits) 2, // Channels 4, // Frame Size 11025.0f, // Frame Rate false); // Big-endian ConcatStream cs = new ConcatStream(); FFTProcess fftProcessor = new FFTProcess(FFTProcess.FFT_WINDOW_SIZE); for (QuantizedSpectrum qs : outputList) { Sample s0 = fftProcessor.quantizedInverseFFT(qs); Sample[] s = new Sample[2]; s[0] = s0; s[1] = s0; InputStream bais = WavIO.writeSamples(targetFormat, s); cs.addStream(bais, s[0].size()); } AudioInputStream outputAIS = new AudioInputStream(cs, targetFormat, cs.size()); AudioSystem.write(outputAIS, AudioFileFormat.Type.WAVE, new File(String.format("sample.wav"))); }
From source file:org.ensor.fftmusings.rnn2.GravesLSTMCharModellingExample.java
/** * Generate a sample from the network, given an (optional, possibly null) * initialization. Initialization can be used to 'prime' the RNN with a * sequence you want to extend/continue.<br> * Note that the initalization is used for all samples * * @param initialization String, may be null. If null, select a random * character as initialization for all samples * @param charactersToSample Number of characters to sample from network * (excluding initialization)//from w w w . j av a 2s . c o m * @param net MultiLayerNetwork with one or more GravesLSTM/RNN layers and a * softmax output layer * @param iter CharacterIterator. Used for going from indexes back to * characters */ private static String[] sampleCharactersFromNetwork(String initialization, MultiLayerNetwork net, CharacterIterator iter, Random rng, int charactersToSample, int numSamples) { //Set up initialization. If no initialization: use a random character if (initialization == null) { initialization = String.valueOf(iter.getRandomCharacter()); } //Create input for initialization INDArray initializationInput = Nd4j.zeros(numSamples, iter.inputColumns(), initialization.length()); char[] init = initialization.toCharArray(); for (int i = 0; i < init.length; i++) { int idx = iter.convertCharacterToIndex(init[i]); for (int j = 0; j < numSamples; j++) { initializationInput.putScalar(new int[] { j, idx, i }, 1.0f); } } StringBuilder[] sb = new StringBuilder[numSamples]; for (int i = 0; i < numSamples; i++) { sb[i] = new StringBuilder(initialization); } //Sample from network (and feed samples back into input) one character at a time (for all samples) //Sampling is done in parallel here net.rnnClearPreviousState(); INDArray output = net.rnnTimeStep(initializationInput); output = output.tensorAlongDimension(output.size(2) - 1, 1, 0); //Gets the last time step output for (int i = 0; i < charactersToSample; i++) { //Set up next input (single time step) by sampling from previous output INDArray nextInput = Nd4j.zeros(numSamples, iter.inputColumns()); //Output is a probability distribution. Sample from this for each example we want to generate, and add it to the new input for (int s = 0; s < numSamples; s++) { double[] outputProbDistribution = new double[iter.totalOutcomes()]; for (int j = 0; j < outputProbDistribution.length; j++) { outputProbDistribution[j] = output.getDouble(s, j); } int sampledCharacterIdx = sampleFromDistribution(outputProbDistribution, rng); nextInput.putScalar(new int[] { s, sampledCharacterIdx }, 1.0f); //Prepare next time step input sb[s].append(iter.convertIndexToCharacter(sampledCharacterIdx)); //Add sampled character to StringBuilder (human readable output) } output = net.rnnTimeStep(nextInput); //Do one time step of forward pass } String[] out = new String[numSamples]; for (int i = 0; i < numSamples; i++) { out[i] = sb[i].toString(); } return out; }