Example usage for javax.sound.sampled AudioSystem NOT_SPECIFIED

List of usage examples for javax.sound.sampled AudioSystem NOT_SPECIFIED

Introduction

In this page you can find the example usage for javax.sound.sampled AudioSystem NOT_SPECIFIED.

Prototype

int NOT_SPECIFIED

To view the source code for javax.sound.sampled AudioSystem NOT_SPECIFIED.

Click Source Link

Document

An integer that stands for an unknown numeric value.

Usage

From source file:marytts.tests.junit4.EnvironmentTest.java

@Test
public void testMP3Available() throws Exception {
    AudioFormat mp3af = new AudioFormat(new AudioFormat.Encoding("MPEG1L3"), AudioSystem.NOT_SPECIFIED,
            AudioSystem.NOT_SPECIFIED, 1, AudioSystem.NOT_SPECIFIED, AudioSystem.NOT_SPECIFIED, false);
    AudioInputStream waveStream = AudioSystem
            .getAudioInputStream(this.getClass().getResourceAsStream("test.wav"));
    // Now attempt conversion:
    if (MaryRuntimeUtils.canCreateMP3()) {
        assertTrue(AudioSystem.isConversionSupported(mp3af, waveStream.getFormat()));
        AudioInputStream mp3Stream = AudioSystem.getAudioInputStream(mp3af, waveStream);
    } else {//from   w w w  .j a v a  2s. c  o  m
        assertFalse(AudioSystem.isConversionSupported(mp3af, waveStream.getFormat()));
    }
}

From source file:marytts.tools.perceptiontest.UtterancePlayRequestHandler.java

private void process(Address serverAddressAtClient, Map<String, String> queryItems, HttpResponse response) {

    boolean streamingAudio = true;
    AudioFileFormat.Type audioFileFormatType = AudioFileFormat.Type.WAVE;
    AudioFormat audioFormat = Voice.AF16000;
    AudioFileFormat audioFileFormat = new AudioFileFormat(audioFileFormatType, audioFormat,
            AudioSystem.NOT_SPECIFIED);

    final UtteranceRequest maryRequest = new UtteranceRequest(getId(), audioFileFormat, streamingAudio);
    // Process the request and send back the data
    boolean ok = true;
    //boolean okTest1 = queryItems.containsKey("EMAIL");
    //boolean okTest2 = queryItems.containsKey("PRESENT_SAMPLE_NUMBER");
    //boolean okTest3 = queryItems.containsKey("PERCEPTION_RESULT");

    boolean okTest = queryItems.containsKey("PRESENT_SAMPLE_NUMBER");

    //if(okTest1 && okTest2) {
    if (!okTest) {
        String message = "Problem reading input";
        logger.warn(message);//from www  .  j  ava2s. c  om
        MaryHttpServerUtils.errorInternalServerError(response, message, new Exception());
        ok = false;
    }

    if (ok) {
        int presentSample = (new Integer(queryItems.get("PRESENT_SAMPLE_NUMBER"))).intValue();
        final String waveFile = this.infoRH.getSampleWaveFile(presentSample);

        if (streamingAudio) {
            // Start two separate threads:
            // 1. one thread to process the request;
            new Thread("RH " + maryRequest.getId()) {
                public void run() {
                    Logger myLogger = MaryUtils.getLogger(this.getName());
                    try {
                        maryRequest.process(waveFile);
                        myLogger.info("Streaming request processed successfully.");
                    } catch (Throwable t) {
                        myLogger.error("Processing failed.", t);
                    }
                }
            }.start();

            // 2. one thread to take the audio data as it becomes available
            //    and write it into the ProducingNHttpEntity.
            // The second one does not depend on the first one practically,
            // because the AppendableSequenceAudioInputStream returned by
            // maryRequest.getAudio() was already created in the constructor of Request.
            AudioInputStream audio = maryRequest.getAudio();
            assert audio != null : "Streaming audio but no audio stream -- very strange indeed! :-(";
            AudioFileFormat.Type audioType = maryRequest.getAudioFileFormat().getType();
            AudioStreamNHttpEntity entity = new AudioStreamNHttpEntity(maryRequest);
            new Thread(entity, "HTTPWriter " + maryRequest.getId()).start();
            // entity knows its contentType, no need to set explicitly here.
            response.setEntity(entity);
            response.setStatusCode(HttpStatus.SC_OK);
            return;
        } else { // not streaming audio
            // Process input data to output data
            try {
                maryRequest.process(waveFile); // this may take some time
            } catch (Throwable e) {
                String message = "Processing failed.";
                logger.error(message, e);
                MaryHttpServerUtils.errorInternalServerError(response, message, e);
                ok = false;
            }
            if (ok) {
                // Write output data to client
                try {
                    ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
                    maryRequest.writeOutputData(outputStream);
                    String contentType;
                    contentType = MaryHttpServerUtils.getMimeType(maryRequest.getAudioFileFormat().getType());
                    MaryHttpServerUtils.toHttpResponse(outputStream.toByteArray(), response, contentType);
                } catch (Exception e) {
                    String message = "Cannot write output";
                    logger.warn(message, e);
                    MaryHttpServerUtils.errorInternalServerError(response, message, e);
                    ok = false;
                }
            }
        }
    }

    if (ok)
        logger.info("Request handled successfully.");
    else
        logger.info("Request couldn't be handled successfully.");
    if (MaryRuntimeUtils.lowMemoryCondition()) {
        logger.info("Low memory condition detected (only " + MaryUtils.availableMemory()
                + " bytes left). Triggering garbage collection.");
        Runtime.getRuntime().gc();
        logger.info("After garbage collection: " + MaryUtils.availableMemory() + " bytes available.");
    }

}

From source file:be.tarsos.transcoder.ffmpeg.FFMPEGExecutor.java

public AudioInputStream pipe(Attributes attributes) throws EncoderException {
    String pipeEnvironment;/*from  w ww. j a v a  2  s.  c o  m*/
    String pipeArgument;
    File pipeLogFile;
    int pipeBuffer;

    if (System.getProperty("os.name").indexOf("indows") > 0) {
        pipeEnvironment = "cmd.exe";
        pipeArgument = "/C";
    } else {
        pipeEnvironment = "/bin/bash";
        pipeArgument = "-c";
    }
    pipeLogFile = new File("decoder_log.txt");
    //buffer 1/4 second of audio.
    pipeBuffer = attributes.getSamplingRate() / 4;

    AudioFormat audioFormat = Encoder.getTargetAudioFormat(attributes);

    String command = toString();

    ProcessBuilder pb = new ProcessBuilder(pipeEnvironment, pipeArgument, command);

    pb.redirectError(Redirect.appendTo(pipeLogFile));

    LOG.fine("Starting piped decoding process");
    final Process process;
    try {
        process = pb.start();
    } catch (IOException e1) {
        throw new EncoderException("Problem starting piped sub process: " + e1.getMessage());
    }

    InputStream stdOut = new BufferedInputStream(process.getInputStream(), pipeBuffer);

    //read and ignore the 46 byte wav header, only pipe the pcm samples to the audioinputstream
    byte[] header = new byte[46];
    double sleepSeconds = 0;
    double timeoutLimit = 20; //seconds

    try {
        while (stdOut.available() < header.length) {
            try {
                Thread.sleep(100);
                sleepSeconds += 0.1;
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
            if (sleepSeconds > timeoutLimit) {
                throw new Error("Could not read from pipe within " + timeoutLimit + " seconds: timeout!");
            }
        }
        int bytesRead = stdOut.read(header);
        if (bytesRead != header.length) {
            throw new EncoderException(
                    "Could not read complete WAV-header from pipe. This could result in mis-aligned frames!");
        }
    } catch (IOException e1) {
        throw new EncoderException("Problem reading from piped sub process: " + e1.getMessage());
    }

    final AudioInputStream audioStream = new AudioInputStream(stdOut, audioFormat, AudioSystem.NOT_SPECIFIED);

    //This thread waits for the end of the subprocess.
    new Thread(new Runnable() {
        public void run() {
            try {
                process.waitFor();
                LOG.fine("Finished piped decoding process");
            } catch (InterruptedException e) {
                LOG.severe("Interrupted while waiting for sub process exit.");
                e.printStackTrace();
            }
        }
    }, "Decoding Pipe Reader").start();
    return audioStream;
}

From source file:marytts.server.http.SynthesisRequestHandler.java

public void process(Address serverAddressAtClient, Map<String, String> queryItems, HttpResponse response) {
    if (queryItems == null || !(queryItems.containsKey("INPUT_TYPE") && queryItems.containsKey("OUTPUT_TYPE")
            && queryItems.containsKey("LOCALE") && queryItems.containsKey("INPUT_TEXT"))) {
        MaryHttpServerUtils.errorMissingQueryParameter(response,
                "'INPUT_TEXT' and 'INPUT_TYPE' and 'OUTPUT_TYPE' and 'LOCALE'");
        return;//ww w  .j  a va2 s . c  om
    }

    String inputText = queryItems.get("INPUT_TEXT");

    MaryDataType inputType = MaryDataType.get(queryItems.get("INPUT_TYPE"));
    if (inputType == null) {
        MaryHttpServerUtils.errorWrongQueryParameterValue(response, "INPUT_TYPE", queryItems.get("INPUT_TYPE"),
                null);
        return;
    }

    MaryDataType outputType = MaryDataType.get(queryItems.get("OUTPUT_TYPE"));
    if (outputType == null) {
        MaryHttpServerUtils.errorWrongQueryParameterValue(response, "OUTPUT_TYPE",
                queryItems.get("OUTPUT_TYPE"), null);
        return;
    }
    boolean isOutputText = true;
    boolean streamingAudio = false;
    AudioFileFormat.Type audioFileFormatType = null;
    if (outputType.name().contains("AUDIO")) {
        isOutputText = false;
        String audioTypeName = queryItems.get("AUDIO");
        if (audioTypeName == null) {
            MaryHttpServerUtils.errorMissingQueryParameter(response, "'AUDIO' when OUTPUT_TYPE=AUDIO");
            return;
        }
        if (audioTypeName.endsWith("_STREAM")) {
            streamingAudio = true;
        }
        int lastUnderscore = audioTypeName.lastIndexOf('_');
        if (lastUnderscore != -1) {
            audioTypeName = audioTypeName.substring(0, lastUnderscore);
        }
        try {
            audioFileFormatType = MaryAudioUtils.getAudioFileFormatType(audioTypeName);
        } catch (Exception ex) {
        }
        if (audioFileFormatType == null) {
            MaryHttpServerUtils.errorWrongQueryParameterValue(response, "AUDIO", queryItems.get("AUDIO"), null);
            return;
        } else if (audioFileFormatType.toString().equals("MP3") && !MaryRuntimeUtils.canCreateMP3()) {
            MaryHttpServerUtils.errorWrongQueryParameterValue(response, "AUDIO", queryItems.get("AUDIO"),
                    "Conversion to MP3 not supported.");
            return;
        } else if (audioFileFormatType.toString().equals("Vorbis") && !MaryRuntimeUtils.canCreateOgg()) {
            MaryHttpServerUtils.errorWrongQueryParameterValue(response, "AUDIO", queryItems.get("AUDIO"),
                    "Conversion to OGG Vorbis format not supported.");
            return;
        }
    }
    // optionally, there may be output type parameters
    // (e.g., the list of features to produce for the output type TARGETFEATURES)
    String outputTypeParams = queryItems.get("OUTPUT_TYPE_PARAMS");

    Locale locale = MaryUtils.string2locale(queryItems.get("LOCALE"));
    if (locale == null) {
        MaryHttpServerUtils.errorWrongQueryParameterValue(response, "LOCALE", queryItems.get("LOCALE"), null);
        return;
    }

    Voice voice = null;
    String voiceName = queryItems.get("VOICE");
    if (voiceName != null) {
        if (voiceName.equals("male") || voiceName.equals("female")) {
            voice = Voice.getVoice(locale, new Voice.Gender(voiceName));
        } else {
            voice = Voice.getVoice(voiceName);
        }
        if (voice == null) {
            // a voice name was given but there is no such voice
            MaryHttpServerUtils.errorWrongQueryParameterValue(response, "VOICE", queryItems.get("VOICE"), null);
            return;
        }
    }
    if (voice == null) { // no voice tag -- use locale default if it exists.
        voice = Voice.getDefaultVoice(locale);
        logger.debug("No voice requested -- using default " + voice);
    }

    String style = queryItems.get("STYLE");
    if (style == null)
        style = "";

    String effects = toRequestedAudioEffectsString(queryItems);
    if (effects.length() > 0)
        logger.debug("Audio effects requested: " + effects);
    else
        logger.debug("No audio effects requested");

    String logMsg = queryItems.get("LOG");
    if (logMsg != null) {
        logger.info("Connection info: " + logMsg);
    }

    // Now, the parse is complete.

    // Construct audio file format -- even when output is not AUDIO,
    // in case we need to pass via audio to get our output type.
    if (audioFileFormatType == null) {
        audioFileFormatType = AudioFileFormat.Type.AU;
    }
    AudioFormat audioFormat;
    if (audioFileFormatType.toString().equals("MP3")) {
        audioFormat = MaryRuntimeUtils.getMP3AudioFormat();
    } else if (audioFileFormatType.toString().equals("Vorbis")) {
        audioFormat = MaryRuntimeUtils.getOggAudioFormat();
    } else if (voice != null) {
        audioFormat = voice.dbAudioFormat();
    } else {
        audioFormat = Voice.AF16000;
    }
    AudioFileFormat audioFileFormat = new AudioFileFormat(audioFileFormatType, audioFormat,
            AudioSystem.NOT_SPECIFIED);

    final Request maryRequest = new Request(inputType, outputType, locale, voice, effects, style, getId(),
            audioFileFormat, streamingAudio, outputTypeParams);

    // Process the request and send back the data
    boolean ok = true;
    try {
        maryRequest.setInputData(inputText);
        logger.info("Read: " + inputText);
    } catch (Exception e) {
        String message = "Problem reading input";
        logger.warn(message, e);
        MaryHttpServerUtils.errorInternalServerError(response, message, e);
        ok = false;
    }
    if (ok) {
        if (streamingAudio) {
            // Start two separate threads:
            // 1. one thread to process the request;
            new Thread("RH " + maryRequest.getId()) {
                public void run() {
                    Logger myLogger = MaryUtils.getLogger(this.getName());
                    try {
                        maryRequest.process();
                        myLogger.info("Streaming request processed successfully.");
                    } catch (Throwable t) {
                        myLogger.error("Processing failed.", t);
                    }
                }
            }.start();

            // 2. one thread to take the audio data as it becomes available
            //    and write it into the ProducingNHttpEntity.
            // The second one does not depend on the first one practically,
            // because the AppendableSequenceAudioInputStream returned by
            // maryRequest.getAudio() was already created in the constructor of Request.
            AudioInputStream audio = maryRequest.getAudio();
            assert audio != null : "Streaming audio but no audio stream -- very strange indeed! :-(";
            AudioFileFormat.Type audioType = maryRequest.getAudioFileFormat().getType();
            AudioStreamNHttpEntity entity = new AudioStreamNHttpEntity(maryRequest);
            new Thread(entity, "HTTPWriter " + maryRequest.getId()).start();
            // entity knows its contentType, no need to set explicitly here.
            response.setEntity(entity);
            response.setStatusCode(HttpStatus.SC_OK);
            return;
        } else { // not streaming audio
            // Process input data to output data
            try {
                maryRequest.process(); // this may take some time
            } catch (Throwable e) {
                String message = "Processing failed.";
                logger.error(message, e);
                MaryHttpServerUtils.errorInternalServerError(response, message, e);
                ok = false;
            }
            if (ok) {
                // Write output data to client
                try {
                    ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
                    maryRequest.writeOutputData(outputStream);
                    String contentType;
                    if (maryRequest.getOutputType().isXMLType() || maryRequest.getOutputType().isTextType()) //text output
                        contentType = "text/plain; charset=UTF-8";
                    else //audio output
                        contentType = MaryHttpServerUtils
                                .getMimeType(maryRequest.getAudioFileFormat().getType());
                    MaryHttpServerUtils.toHttpResponse(outputStream.toByteArray(), response, contentType);
                } catch (Exception e) {
                    String message = "Cannot write output";
                    logger.warn(message, e);
                    MaryHttpServerUtils.errorInternalServerError(response, message, e);
                    ok = false;
                }
            }
        }
    }

    if (ok)
        logger.info("Request handled successfully.");
    else
        logger.info("Request couldn't be handled successfully.");
    if (MaryRuntimeUtils.lowMemoryCondition()) {
        logger.info("Low memory condition detected (only " + MaryUtils.availableMemory()
                + " bytes left). Triggering garbage collection.");
        Runtime.getRuntime().gc();
        logger.info("After garbage collection: " + MaryUtils.availableMemory() + " bytes available.");
    }
}

From source file:com.limegroup.gnutella.gui.mp3.BasicPlayer.java

/**
 * Inits a DateLine.<br>//from  www. ja  v  a 2  s  . c o m
 *
 * We check if the line supports Volume and Pan controls.
 *
 * From the AudioInputStream, i.e. from the sound file, we
 * fetch information about the format of the audio data. These
 * information include the sampling frequency, the number of
 * channels and the size of the samples. There information
 * are needed to ask JavaSound for a suitable output line
 * for this audio file.
 * Furthermore, we have to give JavaSound a hint about how
 * big the internal buffer for the line should be. Here,
 * we say AudioSystem.NOT_SPECIFIED, signaling that we don't
 * care about the exact size. JavaSound will use some default
 * value for the buffer size.
 */
private void createLine() throws LineUnavailableException {
    if (m_line == null) {
        AudioFormat sourceFormat = m_audioInputStream.getFormat();
        if (LOG.isDebugEnabled())
            LOG.debug("Source format : " + sourceFormat);
        AudioFormat targetFormat = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED,
                sourceFormat.getSampleRate(), 16, sourceFormat.getChannels(), sourceFormat.getChannels() * 2,
                sourceFormat.getSampleRate(), false);

        if (LOG.isDebugEnabled())
            LOG.debug("Target format: " + targetFormat);
        m_audioInputStream = AudioSystem.getAudioInputStream(targetFormat, m_audioInputStream);
        AudioFormat audioFormat = m_audioInputStream.getFormat();
        if (LOG.isDebugEnabled())
            LOG.debug("Create Line : " + audioFormat);
        DataLine.Info info = new DataLine.Info(SourceDataLine.class, audioFormat, AudioSystem.NOT_SPECIFIED);
        m_line = (SourceDataLine) AudioSystem.getLine(info);

        /*-- Display supported controls --*/
        Control[] c = m_line.getControls();
        for (int p = 0; p < c.length; p++) {
            if (LOG.isDebugEnabled())
                LOG.debug("Controls : " + c[p].toString());
        }
        /*-- Is Gain Control supported ? --*/
        if (m_line.isControlSupported(FloatControl.Type.MASTER_GAIN)) {
            m_gainControl = (FloatControl) m_line.getControl(FloatControl.Type.MASTER_GAIN);
            if (LOG.isDebugEnabled())
                LOG.debug("Master Gain Control : [" + m_gainControl.getMinimum() + ","
                        + m_gainControl.getMaximum() + "]," + m_gainControl.getPrecision());
        }

        /*-- Is Pan control supported ? --*/
        if (m_line.isControlSupported(FloatControl.Type.PAN)) {
            m_panControl = (FloatControl) m_line.getControl(FloatControl.Type.PAN);
            if (LOG.isDebugEnabled())
                LOG.debug("Pan Control : [" + m_panControl.getMinimum() + "," + m_panControl.getMaximum() + "],"
                        + m_panControl.getPrecision());
        }
    }
}

From source file:edu.mit.csail.sls.wami.relay.WamiRelay.java

/**
 * This delegates recognition to the {@link IRecognizer} associated with
 * this relay, providing the appropriate callbacks
 * /*from w ww  .  java  2  s .co  m*/
 * @param audioIn
 *            The audio input stream to recognizer
 * @throws RecognizerException
 *             On recognition error
 * @throws IOException
 *             on error reading from the audioIn stream
 */
public void recognize(AudioInputStream audioIn) throws RecognizerException, IOException {
    final ByteArrayOutputStream audioByteStream = new ByteArrayOutputStream();
    final AudioFormat audioFormat = audioIn.getFormat();
    TeeInputStream tee = new TeeInputStream(audioIn, audioByteStream, true);
    AudioInputStream forkedStream = new AudioInputStream(tee, audioIn.getFormat(), AudioSystem.NOT_SPECIFIED);

    if (recognizer == null) {
        throw new RecognizerException("No recognizer specified!");
    } else if (wamiApp == null) {
        throw new RecognizerException("No wami app specified!");
    }

    recognizer.recognize(forkedStream, new IRecognitionListener() {
        private long startedTimestamp;

        public void onRecognitionResult(final IRecognitionResult result) {
            // if the result is final, then before we delegate it
            // we switch over our audio stream so that
            // getLastRecordedAudio() works properly inside of
            // on RecognitionResult
            long timestampMillis = System.currentTimeMillis();
            if (!result.isIncremental()) {
                try {
                    audioByteStream.close();
                } catch (IOException e) {
                    e.printStackTrace(); // shouldn't occur
                }

                synchronized (lastAudioLock) {
                    lastAudioBytes = audioByteStream.toByteArray();
                    lastAudioFormat = audioFormat;
                }
            }
            wamiApp.onRecognitionResult(result);
            logEvent(result, timestampMillis);

            if (!result.isIncremental()) {
                logUtterance(audioByteStream.toByteArray(), audioFormat, startedTimestamp);
            }
        }

        public void onRecognitionStarted() {
            startedTimestamp = System.currentTimeMillis();
            logEvent(new RecognitionStartedLogEvent(), startedTimestamp);
            wamiApp.onRecognitionStarted();
        }

    });
}

From source file:com.player.BasicMP3Player.java

/**
 * Inits a DateLine.<br>//from   w  w w.  j  av a 2 s  .  c  o  m
 * We check if the line supports Gain and Pan controls. From the AudioInputStream, i.e. from the
 * sound file, we fetch information about the format of the audio data. These information include
 * the sampling frequency, the number of channels and the size of the samples. There information
 * are needed to ask JavaSound for a suitable output line for this audio file. Furthermore, we
 * have to give JavaSound a hint about how big the internal buffer for the line should be. Here,
 * we say AudioSystem.NOT_SPECIFIED, signaling that we don't care about the exact size. JavaSound
 * will use some default value for the buffer size.
 */
private void createLine() throws LineUnavailableException {
    log.info("Create Line");
    if (m_line == null) {
        AudioFormat sourceFormat = m_audioInputStream.getFormat();
        log.info("Create Line : Source format : " + sourceFormat.toString());
        AudioFormat targetFormat = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED,
                sourceFormat.getSampleRate(), 16, sourceFormat.getChannels(), sourceFormat.getChannels() * 2,
                sourceFormat.getSampleRate(), false);
        log.info("Create Line : Target format: " + targetFormat);
        // Keep a reference on encoded stream to progress notification.
        m_encodedaudioInputStream = m_audioInputStream;
        try {
            // Get total length in bytes of the encoded stream.
            encodedLength = m_encodedaudioInputStream.available();
        } catch (IOException e) {
            log.error("Cannot get m_encodedaudioInputStream.available()", e);
        }
        // Create decoded stream.
        m_audioInputStream = AudioSystem.getAudioInputStream(targetFormat, m_audioInputStream);
        AudioFormat audioFormat = m_audioInputStream.getFormat();
        DataLine.Info info = new DataLine.Info(SourceDataLine.class, audioFormat, AudioSystem.NOT_SPECIFIED);
        m_line = (SourceDataLine) AudioSystem.getLine(info);

        /*-- Display supported controls --*/
        Control[] c = m_line.getControls();
        for (int p = 0; p < c.length; p++) {
            log.debug("Controls : " + c[p].toString());
        }

        /*-- Is Gain Control supported ? --*/
        if (m_line.isControlSupported(FloatControl.Type.MASTER_GAIN)) {
            m_gainControl = (FloatControl) m_line.getControl(FloatControl.Type.MASTER_GAIN);
            log.info("Master Gain Control : [" + m_gainControl.getMinimum() + "," + m_gainControl.getMaximum()
                    + "] " + m_gainControl.getPrecision());
        }

        /*-- Is Pan control supported ? --*/
        if (m_line.isControlSupported(FloatControl.Type.PAN)) {
            m_panControl = (FloatControl) m_line.getControl(FloatControl.Type.PAN);
            log.info("Pan Control : [" + m_panControl.getMinimum() + "," + m_panControl.getMaximum() + "] "
                    + m_panControl.getPrecision());
        }
    }
}

From source file:BasicPlayer.java

/**
 * Inits a DateLine.<br>/*from w  w  w.  j a  v  a  2 s. c  om*/
 *
 * We check if the line supports Gain and Pan controls.
 *
 * From the AudioInputStream, i.e. from the sound file, we
 * fetch information about the format of the audio data. These
 * information include the sampling frequency, the number of
 * channels and the size of the samples. There information
 * are needed to ask JavaSound for a suitable output line
 * for this audio file.
 * Furthermore, we have to give JavaSound a hint about how
 * big the internal buffer for the line should be. Here,
 * we say AudioSystem.NOT_SPECIFIED, signaling that we don't
 * care about the exact size. JavaSound will use some default
 * value for the buffer size.
 */
protected void createLine() throws LineUnavailableException {
    log.info("Create Line");
    if (m_line == null) {
        AudioFormat sourceFormat = m_audioInputStream.getFormat();
        log.info("Create Line : Source format : " + sourceFormat.toString());
        int nSampleSizeInBits = sourceFormat.getSampleSizeInBits();
        if (nSampleSizeInBits <= 0)
            nSampleSizeInBits = 16;
        if ((sourceFormat.getEncoding() == AudioFormat.Encoding.ULAW)
                || (sourceFormat.getEncoding() == AudioFormat.Encoding.ALAW))
            nSampleSizeInBits = 16;
        if (nSampleSizeInBits != 8)
            nSampleSizeInBits = 16;
        AudioFormat targetFormat = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED,
                sourceFormat.getSampleRate(), nSampleSizeInBits, sourceFormat.getChannels(),
                sourceFormat.getChannels() * (nSampleSizeInBits / 8), sourceFormat.getSampleRate(), false);
        log.info("Create Line : Target format: " + targetFormat);
        // Keep a reference on encoded stream to progress notification.
        m_encodedaudioInputStream = m_audioInputStream;
        try {
            // Get total length in bytes of the encoded stream.
            encodedLength = m_encodedaudioInputStream.available();
        } catch (IOException e) {
            log.error("Cannot get m_encodedaudioInputStream.available()", e);
        }
        // Create decoded stream.
        m_audioInputStream = AudioSystem.getAudioInputStream(targetFormat, m_audioInputStream);
        AudioFormat audioFormat = m_audioInputStream.getFormat();
        DataLine.Info info = new DataLine.Info(SourceDataLine.class, audioFormat, AudioSystem.NOT_SPECIFIED);
        Mixer mixer = getMixer(m_mixerName);
        if (mixer != null) {
            log.info("Mixer : " + mixer.getMixerInfo().toString());
            m_line = (SourceDataLine) mixer.getLine(info);
        } else {
            m_line = (SourceDataLine) AudioSystem.getLine(info);
            m_mixerName = null;
        }
        log.info("Line : " + m_line.toString());
        log.debug("Line Info : " + m_line.getLineInfo().toString());
        log.debug("Line AudioFormat: " + m_line.getFormat().toString());
    }
}

From source file:com.limegroup.gnutella.gui.mp3.BasicPlayer.java

/**
 * Main loop.//w  w  w  .  ja  va 2 s  . com
 *
 * Player Status == STOPPED => End of Thread + Freeing Audio Ressources.<br>
 * Player Status == PLAYING => Audio stream data sent to Audio line.<br>
 * Player Status == PAUSED => Waiting for another status.
 */
public void run() {
    LOG.debug("Thread Running");
    //if (m_audioInputStream.markSupported()) m_audioInputStream.mark(m_audioFileFormat.getByteLength());
    //else trace(1,getClass().getName(), "Mark not supported");
    int nBytesRead = 1;
    m_status = PLAYING;
    int nBytesCursor = 0;
    byte[] abData = new byte[EXTERNAL_BUFFER_SIZE];
    float nFrameSize = (float) m_line.getFormat().getFrameSize();
    float nFrameRate = m_line.getFormat().getFrameRate();
    float bytesPerSecond = nFrameSize * nFrameRate;
    int secondsTotal = Math.round((float) m_audioFileFormat.getByteLength() / bytesPerSecond);
    try {
        AudioMetaData amd = AudioMetaData.parseAudioFile(_file);
        if (amd != null)
            secondsTotal = amd.getLength();
    } catch (IOException ignored) {
    }

    fireSeekSetupRequired(secondsTotal);
    try {
        while ((nBytesRead != -1) && (m_status != STOPPED)) {
            if (m_status == PLAYING) {
                try {
                    if (doSeek > -1) {
                        // Seek implementation. WAV format only !
                        if ((getAudioFileFormat() != null)
                                && (getAudioFileFormat().getType().toString().startsWith("WAV"))) {
                            if ((secondsTotal != AudioSystem.NOT_SPECIFIED) && (secondsTotal > 0)) {
                                m_line.flush();
                                m_line.stop();
                                //m_audioInputStream.reset();
                                m_audioInputStream.close();
                                m_audioInputStream = AudioSystem.getAudioInputStream(_file);
                                nBytesCursor = 0;
                                if (m_audioFileFormat.getByteLength() - doSeek < abData.length)
                                    doSeek = m_audioFileFormat.getByteLength() - abData.length;
                                doSeek = doSeek - doSeek % 4;
                                int toSkip = (int) doSeek;
                                // skip(...) instead of read(...) runs out of memory ?!
                                while ((toSkip > 0) && (nBytesRead > 0)) {
                                    if (toSkip > abData.length)
                                        nBytesRead = m_audioInputStream.read(abData, 0, abData.length);
                                    else
                                        nBytesRead = m_audioInputStream.read(abData, 0, toSkip);
                                    toSkip = toSkip - nBytesRead;
                                    nBytesCursor = nBytesCursor + nBytesRead;
                                }
                                m_line.start();
                            } else {
                                if (LOG.isDebugEnabled())
                                    LOG.debug("Seek not supported for this InputStream : " + secondsTotal);
                            }
                        } else {
                            if (LOG.isDebugEnabled())
                                LOG.debug("Seek not supported for this InputStream : " + secondsTotal);
                        }
                        doSeek = -1;
                    }
                    nBytesRead = m_audioInputStream.read(abData, 0, abData.length);
                } catch (Exception e) {
                    if (LOG.isDebugEnabled())
                        LOG.debug("InputStream error : (" + nBytesRead + ")", e);
                    e.printStackTrace();
                    m_status = STOPPED;
                }
                if (nBytesRead >= 0) {
                    // make sure that you are writing an integral number of the
                    // frame size (nFrameSize).  i think this may skip a few
                    // frames but probably not a big deal.
                    if (nBytesRead % nFrameSize != 0)
                        nBytesRead -= (nBytesRead % nFrameSize);
                    int nBytesWritten = m_line.write(abData, 0, nBytesRead);
                    nBytesCursor = nBytesCursor + nBytesWritten;
                    m_framesRead = ((int) Math.round((float) nBytesCursor / bytesPerSecond));
                }
            } else {
                try {
                    Thread.sleep(1000);
                } catch (InterruptedException e) {
                    LOG.debug("can't sleep", e);
                }
            }
        }

    } finally {
        // close the file and free the audio line.
        try {
            if (m_line != null) {
                try {
                    m_line.drain();
                    m_line.stop();
                } finally {
                    try {
                        m_line.close();
                    } catch (SecurityException ignored) {
                        LOG.trace("Cannot Free Audio ressources", ignored);
                    }
                    m_line = null;
                }
            }
        } finally {
            if (m_audioInputStream != null)
                try {
                    m_audioInputStream.close();
                } catch (IOException ignored) {
                }
        }
    }
    LOG.trace("Thread Stopped");
    firePlayComplete();
    m_status = READY;
}

From source file:marytts.server.http.MivoqSynthesisRequestHandler.java

public void process(Address serverAddressAtClient, Map<String, String> queryItems, HttpResponse response) {
    if (queryItems == null || !(queryItems.containsKey("input[type]") && queryItems.containsKey("output[type]")
            && queryItems.containsKey("input[content]"))) {
        MaryHttpServerUtils.errorMissingQueryParameter(response,
                "'input[content]' and 'input[type]' and 'output[type]'");
        return;// www .ja  v  a 2 s .  co m
    }

    String inputContent = queryItems.get("input[content]");
    String inputTypeString = queryItems.get("input[type]");
    String outputTypeString = queryItems.get("output[type]");
    MaryDataType inputType = MaryDataType.get(inputTypeString);
    if (inputType == null) {
        MaryHttpServerUtils.errorWrongQueryParameterValue(response, "input[type]", inputTypeString, null);
        return;
    }

    MaryDataType outputType = MaryDataType.get(outputTypeString);
    if (outputType == null) {
        MaryHttpServerUtils.errorWrongQueryParameterValue(response, "output[type]", outputTypeString, null);
        return;
    }

    if (inputType.isTextType()) {
        if (!queryItems.containsKey("input[locale]")) {
            MaryHttpServerUtils.errorMissingQueryParameter(response,
                    "'input[locale]', needed for input[type] = " + inputTypeString);
        }
    }
    String inputLocaleString = queryItems.get("input[locale]");
    Locale inputLocale = MaryUtils.string2locale(inputLocaleString);
    if (inputLocale == null) {
        MaryHttpServerUtils.errorWrongQueryParameterValue(response, "input[locale]", inputLocaleString, null);
        return;
    }

    boolean isOutputText = true;
    boolean streamingAudio = false;
    AudioFileFormat.Type audioFileFormatType = null;
    if (outputType.name().contains("AUDIO")) {
        isOutputText = false;
        String outputFormatString = queryItems.get("output[format]");
        if (outputFormatString == null) {
            MaryHttpServerUtils.errorMissingQueryParameter(response,
                    "'output[format]' when output[type] = AUDIO");
            return;
        }
        if (outputFormatString.endsWith("_STREAM")) {
            streamingAudio = true;
        }
        int lastUnderscore = outputFormatString.lastIndexOf('_');
        if (lastUnderscore != -1) {
            outputFormatString = outputFormatString.substring(0, lastUnderscore);
        }
        try {
            audioFileFormatType = MaryAudioUtils.getAudioFileFormatType(outputFormatString);
        } catch (Exception ex) {
        }
        if (audioFileFormatType == null) {
            MaryHttpServerUtils.errorWrongQueryParameterValue(response, "output[format]", outputFormatString,
                    null);
            return;
        } else if (audioFileFormatType.toString().equals("MP3") && !MaryRuntimeUtils.canCreateMP3()) {
            MaryHttpServerUtils.errorWrongQueryParameterValue(response, "output[format]", outputFormatString,
                    "Conversion to MP3 not supported.");
            return;
        } else if (audioFileFormatType.toString().equals("Vorbis") && !MaryRuntimeUtils.canCreateOgg()) {
            MaryHttpServerUtils.errorWrongQueryParameterValue(response, "output[format]", outputFormatString,
                    "Conversion to OGG Vorbis format not supported.");
            return;
        }
    }

    Voice voice = null;
    String voiceGenderString = queryItems.get("voice[gender]");
    Voice.Gender voiceGender = null;
    if (voiceGenderString != null) {
        if (!(voiceGenderString.equals("male") || voiceGenderString.equals("female")
                || voiceGenderString.equals("neutral"))) {
            MaryHttpServerUtils.errorWrongQueryParameterValue(response, "voice[gender]", voiceGenderString,
                    null);
        }
        voiceGender = new Voice.Gender(voiceGenderString);
    }
    String voiceAgeString = queryItems.get("voice[age]");
    int voiceAge = -1;
    if (voiceAgeString != null) {
        voiceAge = Integer.parseInt(voiceAgeString);
        if (voiceAge < 0) {
            MaryHttpServerUtils.errorWrongQueryParameterValue(response, "voice[age]", voiceAgeString, null);
        }
    }
    String voiceVariantString = queryItems.get("voice[variant]");
    int voiceVariant = -1;
    if (voiceVariantString != null) {
        voiceVariant = Integer.parseInt(voiceVariantString);
        if (voiceVariant < 0) {
            MaryHttpServerUtils.errorWrongQueryParameterValue(response, "voice[variant]", voiceVariantString,
                    null);
        }
    }
    String utteranceStyle = queryItems.get("utterance[style]");
    if (utteranceStyle == null) {
        utteranceStyle = "";
    }

    String voiceName = queryItems.get("voice[name]");
    String[] voiceNameList = null;
    if (voiceName != null) {
        voiceNameList = voiceName.split(" ");
    }

    String utteranceEffects = queryItems.get("utterance[effects]");
    if (utteranceEffects == null) {
        utteranceEffects = "";
    }
    if (utteranceEffects.length() > 0)
        logger.debug("Audio effects requested: " + utteranceEffects);
    else
        logger.debug("No audio effects requested");

    // TODO(START,Parsing)

    // optionally, there may be output type parameters
    // (e.g., the list of features to produce for the output type TARGETFEATURES)
    String outputTypeParams = queryItems.get("OUTPUT_TYPE_PARAMS");

    String logMsg = queryItems.get("LOG");
    if (logMsg != null) {
        logger.info("Connection info: " + logMsg);
    }

    // TODO(END,Parsing)

    List<Voice> voiceResult = Voice.getVoiceWithSSMLAlgorythm(inputLocale, voiceGender, voiceNameList,
            voiceAge);
    if (voice == null) { // no voice tag -- use locale default if it exists.
        voice = Voice.getDefaultVoice(inputLocale);
        logger.debug("No voice requested -- using default " + voice);
    }
    if (voiceResult.isEmpty()) {
        MaryHttpServerUtils.errorWrongQueryParameterValue(response, "input[] and voice[]",
                "No suitable voice found for the requested configuration", null);
        return;
    }
    if (voiceVariant > 0) {
        voiceVariant--;
        if (voiceVariant >= voiceResult.size()) {
            voiceVariant = voiceResult.size() - 1;
        }
    } else {
        voiceVariant = 0;
    }
    voice = voiceResult.get(voiceVariant);
    inputLocale = voice.getLocale();

    String utteranceStyleEffects = "";
    if (fakeStylesByGender.containsKey(voice.gender().toString())) {
        HashMap<String, String> s = fakeStylesByGender.get(voice.gender().toString());
        if (s.containsKey(utteranceStyle)) {
            utteranceStyleEffects = s.get(utteranceStyle);
        }
    }
    HashMap<String, Object> effects_values = new HashMap<String, Object>();
    if (utteranceStyleEffects.length() > 0) {
        JSONArray effects = new JSONArray(utteranceStyleEffects);
        for (int i = 0; i < effects.length(); i++) {
            JSONObject obj = effects.getJSONObject(i);
            parseEffectsIntoHashMap(effectsRegistry, effects_values, obj);
            // System.out.println(toOldStyleEffectsString(registry, effects_values));
        }
        // System.out.println(toOldStyleEffectsString(registry, effects_values));
    }
    if (utteranceEffects.length() > 0) {
        JSONArray effects = new JSONArray(utteranceEffects);
        for (int i = 0; i < effects.length(); i++) {
            JSONObject obj = effects.getJSONObject(i);
            parseEffectsIntoHashMap(effectsRegistry, effects_values, obj);
            // System.out.println(toOldStyleEffectsString(registry, effects_values));
        }
        // System.out.println(toOldStyleEffectsString(registry, effects_values));
    }
    utteranceEffects = toOldStyleEffectsString(effectsRegistry, effects_values);
    if (utteranceEffects.length() > 0)
        logger.debug("Audio effects requested: " + utteranceEffects);
    else
        logger.debug("No audio effects requested");
    // Now, the parse is complete.

    // Construct audio file format -- even when output is not AUDIO,
    // in case we need to pass via audio to get our output type.
    if (audioFileFormatType == null) {
        audioFileFormatType = AudioFileFormat.Type.AU;
    }
    AudioFormat audioFormat;
    if (audioFileFormatType.toString().equals("MP3")) {
        audioFormat = MaryRuntimeUtils.getMP3AudioFormat();
    } else if (audioFileFormatType.toString().equals("Vorbis")) {
        audioFormat = MaryRuntimeUtils.getOggAudioFormat();
    } else if (voice != null) {
        audioFormat = voice.dbAudioFormat();
    } else {
        audioFormat = Voice.AF16000;
    }
    AudioFileFormat audioFileFormat = new AudioFileFormat(audioFileFormatType, audioFormat,
            AudioSystem.NOT_SPECIFIED);

    final Request maryRequest = new Request(inputType, outputType, inputLocale, voice, utteranceEffects,
            utteranceStyle, getId(), audioFileFormat, streamingAudio, outputTypeParams);

    // Process the request and send back the data
    boolean ok = true;
    try {
        maryRequest.setInputData(inputContent);
        logger.info("Read: " + inputContent);
    } catch (Exception e) {
        String message = "Problem reading input";
        logger.warn(message, e);
        MaryHttpServerUtils.errorInternalServerError(response, message, e);
        ok = false;
    }
    if (ok) {
        if (streamingAudio) {
            // Start two separate threads:
            // 1. one thread to process the request;
            new Thread("RH " + maryRequest.getId()) {
                public void run() {
                    Logger myLogger = MaryUtils.getLogger(this.getName());
                    try {
                        maryRequest.process();
                        myLogger.info("Streaming request processed successfully.");
                    } catch (Throwable t) {
                        myLogger.error("Processing failed.", t);
                    }
                }
            }.start();

            // 2. one thread to take the audio data as it becomes available
            //    and write it into the ProducingNHttpEntity.
            // The second one does not depend on the first one practically,
            // because the AppendableSequenceAudioInputStream returned by
            // maryRequest.getAudio() was already created in the constructor of Request.
            AudioInputStream audio = maryRequest.getAudio();
            assert audio != null : "Streaming audio but no audio stream -- very strange indeed! :-(";
            AudioFileFormat.Type audioType = maryRequest.getAudioFileFormat().getType();
            AudioStreamNHttpEntity entity = new AudioStreamNHttpEntity(maryRequest);
            new Thread(entity, "HTTPWriter " + maryRequest.getId()).start();
            // entity knows its contentType, no need to set explicitly here.
            response.setEntity(entity);
            response.setStatusCode(HttpStatus.SC_OK);
            return;
        } else { // not streaming audio
            // Process input data to output data
            try {
                maryRequest.process(); // this may take some time
            } catch (Throwable e) {
                String message = "Processing failed.";
                logger.error(message, e);
                MaryHttpServerUtils.errorInternalServerError(response, message, e);
                ok = false;
            }
            if (ok) {
                // Write output data to client
                try {
                    ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
                    maryRequest.writeOutputData(outputStream);
                    String contentType;
                    if (maryRequest.getOutputType().isXMLType() || maryRequest.getOutputType().isTextType()) //text output
                        contentType = "text/plain; charset=UTF-8";
                    else //audio output
                        contentType = MaryHttpServerUtils
                                .getMimeType(maryRequest.getAudioFileFormat().getType());
                    MaryHttpServerUtils.toHttpResponse(outputStream.toByteArray(), response, contentType);
                } catch (Exception e) {
                    String message = "Cannot write output";
                    logger.warn(message, e);
                    MaryHttpServerUtils.errorInternalServerError(response, message, e);
                    ok = false;
                }
            }
        }
    }

    if (ok)
        logger.info("Request handled successfully.");
    else
        logger.info("Request couldn't be handled successfully.");
    if (MaryRuntimeUtils.lowMemoryCondition()) {
        logger.info("Low memory condition detected (only " + MaryUtils.availableMemory()
                + " bytes left). Triggering garbage collection.");
        Runtime.getRuntime().gc();
        logger.info("After garbage collection: " + MaryUtils.availableMemory() + " bytes available.");
    }
}