Example usage for org.apache.commons.math3.geometry.euclidean.threed Vector3D getX

List of usage examples for org.apache.commons.math3.geometry.euclidean.threed Vector3D getX

Introduction

In this page you can find the example usage for org.apache.commons.math3.geometry.euclidean.threed Vector3D getX.

Prototype

public double getX() 

Source Link

Document

Get the abscissa of the vector.

Usage

From source file:org.jtrfp.trcl.TriangleList.java

private void setupVertex(int vIndex, int gpuTVIndex, int triangleIndex, TextureDescription td)
        throws ExecutionException, InterruptedException {
    final int numFrames = getPrimitives().length;
    final Triangle t = triangleAt(0, triangleIndex);
    final Vector3D pos = t.getVertices()[vIndex].getPosition();
    final TriangleVertexWindow vw = (TriangleVertexWindow) getMemoryWindow();
    ////////////////////// V E R T E X //////////////////////////////
    if (numFrames == 1) {
        vw.x.set(gpuTVIndex, (short) applyScale(pos.getX()));
        vw.y.set(gpuTVIndex, (short) applyScale(pos.getY()));
        vw.z.set(gpuTVIndex, (short) applyScale(pos.getZ()));
        final Vector3D normal = t.getVertices()[vIndex].getNormal();
        vw.normX.set(gpuTVIndex, (byte) (normal.getX() * 127));
        vw.normY.set(gpuTVIndex, (byte) (normal.getY() * 127));
        vw.normZ.set(gpuTVIndex, (byte) (normal.getZ() * 127));
    } else {//from  w  ww  .  j a  v  a  2  s  .  c  o  m
        float[] xFrames = new float[numFrames];
        float[] yFrames = new float[numFrames];
        float[] zFrames = new float[numFrames];
        float[] nxFrames = new float[numFrames];
        float[] nyFrames = new float[numFrames];
        float[] nzFrames = new float[numFrames];
        for (int i = 0; i < numFrames; i++) {
            xFrames[i] = (float) applyScale(
                    triangleAt(i, triangleIndex).getVertices()[vIndex].getPosition().getX());
        }
        xyzAnimator.addFrames(xFrames);

        for (int i = 0; i < numFrames; i++) {
            yFrames[i] = (float) applyScale(
                    triangleAt(i, triangleIndex).getVertices()[vIndex].getPosition().getY());
        }
        xyzAnimator.addFrames(yFrames);

        for (int i = 0; i < numFrames; i++) {
            zFrames[i] = (float) applyScale(
                    triangleAt(i, triangleIndex).getVertices()[vIndex].getPosition().getZ());
        }
        xyzAnimator.addFrames(zFrames);

        for (int i = 0; i < numFrames; i++) {
            nxFrames[i] = (float) Math
                    .rint(triangleAt(i, triangleIndex).getVertices()[vIndex].getNormal().getX() * 127);
        }
        xyzAnimator.addFrames(nxFrames);

        for (int i = 0; i < numFrames; i++) {
            nyFrames[i] = (float) Math
                    .rint(triangleAt(i, triangleIndex).getVertices()[vIndex].getNormal().getY() * 127);
        }
        xyzAnimator.addFrames(nyFrames);

        for (int i = 0; i < numFrames; i++) {
            nzFrames[i] = (float) Math
                    .rint(triangleAt(i, triangleIndex).getVertices()[vIndex].getNormal().getZ() * 127);
        }
        xyzAnimator.addFrames(nzFrames);
    } //end else(frames!=1)
      //////////////// T E X T U R E ///////////////////////////
    if (td == null) {
        System.err.println("Stack trace of triangle creation below. NullPointerException follows.");
        for (StackTraceElement el : t.getCreationStackTrace()) {
            System.err.println("\tat " + el.getClassName() + "." + el.getMethodName() + "(" + el.getFileName()
                    + ":" + el.getLineNumber() + ")");
        } //end for(stackTrace)
        throw new NullPointerException("Texture for triangle in " + debugName + " intolerably null.");
    }
    if (td instanceof Texture) {// Static texture
        final int sideScalar = ((Texture) td).getSideLength() - 1;
        if (animateUV && numFrames > 1) {// Animated UV
            float[] uFrames = new float[numFrames];
            float[] vFrames = new float[numFrames];
            final WindowAnimator uvAnimator = new WindowAnimator(getFlatTVWindow(), 2, // UV per vertex
                    numFrames, false, getVertexSequencer(timeBetweenFramesMsec, numFrames),
                    new UVXferFunc(gpuTVIndex * UVXferFunc.BACK_STRIDE_LEN));
            getModel().addTickableAnimator(uvAnimator);
            uvAnimator.setDebugName(debugName + ".uvAnimator");
            for (int i = 0; i < numFrames; i++) {
                uFrames[i] = (float) Math.rint(sideScalar * triangleAt(i, triangleIndex).getUV(vIndex).getX());
                vFrames[i] = (float) Math
                        .rint(sideScalar * (1 - triangleAt(i, triangleIndex).getUV(vIndex).getY()));
            } // end for(numFrames)
            uvAnimator.addFrames(uFrames);
            uvAnimator.addFrames(vFrames);
        } else {// end if(animateUV)
            vw.u.set(gpuTVIndex, (short) Math.rint(sideScalar * t.getUV(vIndex).getX()));
            vw.v.set(gpuTVIndex, (short) Math.rint(sideScalar * (1 - t.getUV(vIndex).getY())));
        } // end if(!animateUV)
        final int textureID = ((Texture) td).getTexturePage();
        vw.textureIDLo.set(gpuTVIndex, (byte) (textureID & 0xFF));
        vw.textureIDMid.set(gpuTVIndex, (byte) ((textureID >> 8) & 0xFF));
        vw.textureIDHi.set(gpuTVIndex, (byte) ((textureID >> 16) & 0xFF));
    } // end if(Texture)
    if (td instanceof AnimatedTexture) {//Animated texture
        final AnimatedTexture at = (AnimatedTexture) td;
        if (animateUV && numFrames > 1) {// Animated UV
            float[] uFrames = new float[numFrames];
            float[] vFrames = new float[numFrames];
            final WindowAnimator uvAnimator = new WindowAnimator(getFlatTVWindow(), 2, // UV per vertex
                    numFrames, false, getVertexSequencer(timeBetweenFramesMsec, numFrames),
                    new UVXferFunc(gpuTVIndex * UVXferFunc.BACK_STRIDE_LEN));
            getModel().addTickableAnimator(uvAnimator);
            for (int i = 0; i < numFrames; i++) {
                final int sideScalar = at.getFrames()[i].getSideLength() - 1;
                uFrames[i] = (float) Math.rint(sideScalar * triangleAt(i, triangleIndex).getUV(vIndex).getX());
                vFrames[i] = (float) Math
                        .rint(sideScalar * (1 - triangleAt(i, triangleIndex).getUV(vIndex).getY()));
            } // end for(numFrames)
            uvAnimator.addFrames(uFrames);
            uvAnimator.addFrames(vFrames);
        } else {// end if(animateUV)
            final int sideScalar = at.getFrames()[0].getSideLength() - 1;
            vw.u.set(gpuTVIndex, (short) Math.rint(sideScalar * t.getUV(vIndex).getX()));
            vw.v.set(gpuTVIndex, (short) Math.rint(sideScalar * (1 - t.getUV(vIndex).getY())));
        } // end if(!animateUV)
        final TexturePageAnimator texturePageAnimator = new TexturePageAnimator(at, vw, gpuTVIndex);
        texturePageAnimator.setDebugName(debugName + ".texturePageAnimator");
        getModel().addTickableAnimator(texturePageAnimator);
    } //end if(animated texture)
}

From source file:org.jtrfp.trcl.TriangleList.java

public Vector3D getMaximumVertexDims() {
    if (isPrimitivesFinalized())
        return cachedMaximumVertexDims;
    Vector3D result = Vector3D.ZERO;
    Triangle[][] t = getPrimitives();//w  w  w . ja  va 2s. c  o m
    for (Triangle[] frame : t) {
        for (Triangle tri : frame) {
            for (int i = 0; i < 3; i++) {
                double v;
                final Vector3D pos = tri.getVertices()[i].getPosition();
                v = pos.getX();
                result = result.getX() < v ? new Vector3D(v, result.getY(), result.getZ()) : result;
                v = pos.getY();
                result = result.getY() < v ? new Vector3D(result.getX(), v, result.getZ()) : result;
                v = pos.getZ();
                result = result.getZ() < v ? new Vector3D(result.getX(), result.getY(), v) : result;
            } // end for(vertex)
        } // end for(triangle)
    } // end for(triangles)
    return result;
}

From source file:org.jtrfp.trcl.TriangleList.java

public Vector3D getMinimumVertexDims() {
    if (isPrimitivesFinalized())
        return cachedMinimumVertexDims;
    Vector3D result = new Vector3D(Double.POSITIVE_INFINITY, Double.POSITIVE_INFINITY,
            Double.POSITIVE_INFINITY);
    Triangle[][] t = getPrimitives();//ww  w. j  a v  a  2  s .  co m
    for (Triangle[] frame : t) {
        for (Triangle tri : frame) {
            for (int i = 0; i < 3; i++) {
                double v;
                final Vector3D pos = tri.getVertices()[i].getPosition();
                v = pos.getX();
                result = result.getX() > v ? new Vector3D(v, result.getY(), result.getZ()) : result;
                v = pos.getY();
                result = result.getY() > v ? new Vector3D(result.getX(), v, result.getZ()) : result;
                v = pos.getZ();
                result = result.getZ() > v ? new Vector3D(result.getX(), result.getY(), v) : result;
            } // end for(vertex)
        } // end for(triangle)
    } // end for(triangles)
    return result;
}

From source file:org.jtrfp.trcl.TriangleList.java

public double getMaximumVertexValue() {
    if (isPrimitivesFinalized())
        return cachedMaximumVertexValue;
    double result = 0;
    Triangle[][] t = getPrimitives();/*from   w w  w  .j a  v a2 s  .  c om*/
    for (Triangle[] frame : t) {
        for (Triangle tri : frame) {
            for (int i = 0; i < 3; i++) {
                double v;
                final Vector3D pos = tri.getVertices()[i].getPosition();
                v = Math.abs(pos.getX());
                result = result < v ? v : result;
                v = Math.abs(pos.getY());
                result = result < v ? v : result;
                v = Math.abs(pos.getZ());
                result = result < v ? v : result;
            } // end for(vertex)
        } // end for(triangle)
    } // end for(triangles)
    return result;
}

From source file:org.jtrfp.trcl.Tunnel.java

private Vector3D buildTunnel(TDFFile.Tunnel _tun, Vector3D groundVector, boolean entrance)
        throws IllegalAccessException, UnrecognizedFormatException, FileNotFoundException, FileLoadException,
        IOException {// w ww . j  a  v a 2  s  .c  o  m
    // Entrance uses only a stub. Player is warped to TUNNEL_POS facing
    // TUNNEL_START_DIRECTION
    ResourceManager rm = tr.getResourceManager();
    LVLFile tlvl = rm.getLVL(_tun.getTunnelLVLFile());
    final ColorPaletteVectorList tunnelColorPalette = new ColorPaletteVectorList(
            tr.getResourceManager().getPalette(lvl.getGlobalPaletteFile()));
    TextureDescription[] tunnelTexturePalette = rm.getTextures(tlvl.getLevelTextureListFile(), paletteVL,
            ESTuTvPalette, true);
    TNLFile tun = tr.getResourceManager().getTNLData(tlvl.getHeightMapOrTunnelFile());

    final double segLen = 65536;
    final double bendiness = 18;
    List<Segment> segs = tun.getSegments();
    final LoadingProgressReporter[] reporters = tunnelAssemblyReporter.generateSubReporters(segs.size());
    // Vector3D tunnelEnd = TUNNEL_START_POS;
    Rotation rotation = entrance ? new Rotation(new Vector3D(0, 0, 1), groundVector)
            : new Rotation(new Vector3D(0, 0, 1), new Vector3D(1, 0, 0));
    Vector3D startPoint = TUNNEL_START_POS;

    Vector3D segPos = Vector3D.ZERO;

    final Vector3D top = rotation.applyTo(new Vector3D(0, 1, 0));
    /*
    if (entrance) {
        // Entrance is just a stub so we only need a few of the segments
        List<Segment> newSegs = new ArrayList<Segment>();
        for (int i = 0; i < 10; i++) {
       newSegs.add(segs.get(i));
        }
        segs = newSegs;
    }*/
    // CONSTRUCT AND INSTALL SEGMENTS
    int segIndex = 0;
    Vector3D finalPos = TUNNEL_START_POS;
    for (Segment s : segs) {
        reporters[segIndex].complete();
        tr.getReporter().report(
                "org.jtrfp.trcl.Tunnel." + _tun.getTunnelLVLFile() + ".segment" + (segIndex++) + "",
                s.getObstacle().name());
        // Figure out the space the segment will take
        Vector3D positionDelta = new Vector3D((double) (s.getEndX() - s.getStartX()) * bendiness * -1,
                (double) (s.getEndY() - s.getStartY()) * bendiness, segLen);
        // Create the segment
        Vector3D position = startPoint.add(rotation.applyTo(segPos));
        TunnelSegment ts = new TunnelSegment(tr, s, tunnelTexturePalette, segLen, positionDelta.getX(),
                positionDelta.getY());
        ts.setPosition(position.toArray());
        ts.setHeading(entrance ? groundVector : Vector3D.PLUS_I);
        ts.setTop(entrance ? top : Vector3D.PLUS_J);
        // Install the segment
        add(ts);
        installObstacles(s, tunnelColorPalette, ESTuTvPalette, tunnelTexturePalette,
                entrance ? groundVector : Vector3D.PLUS_I, entrance ? top : Vector3D.PLUS_J, position,
                TR.legacy2Modern(s.getStartWidth() * TunnelSegment.TUNNEL_DIA_SCALAR),
                TR.legacy2Modern(s.getStartWidth() * TunnelSegment.TUNNEL_DIA_SCALAR), tr);
        // Move origin to next segment
        segPos = segPos.add(positionDelta);
        finalPos = position;
    } // end for(segments)
    return finalPos;
}

From source file:org.micromanager.asidispim.AcquisitionPanel.java

/**
 * Actually runs the acquisition; does the dirty work of setting
 * up the controller, the circular buffer, starting the cameras,
 * grabbing the images and putting them into the acquisition, etc.
 * @param testAcq true if running test acquisition only (see runTestAcquisition() javadoc)
 * @param testAcqSide only applies to test acquisition, passthrough from runTestAcquisition() 
 * @return true if ran without any fatal errors.
 *//*from  www  .ja  v  a2s . c  o m*/
private boolean runAcquisitionPrivate(boolean testAcq, Devices.Sides testAcqSide) {

    // sanity check, shouldn't call this unless we aren't running an acquisition
    if (gui_.isAcquisitionRunning()) {
        MyDialogUtils.showError("An acquisition is already running");
        return false;
    }

    if (ASIdiSPIM.getFrame().getHardwareInUse()) {
        MyDialogUtils.showError("Hardware is being used by something else (maybe autofocus?)");
        return false;
    }

    boolean liveModeOriginally = gui_.isLiveModeOn();
    if (liveModeOriginally) {
        gui_.enableLiveMode(false);
    }

    // make sure slice timings are up to date
    // do this automatically; we used to prompt user if they were out of date
    // do this before getting snapshot of sliceTiming_ in acqSettings
    recalculateSliceTiming(!minSlicePeriodCB_.isSelected());

    if (!sliceTiming_.valid) {
        MyDialogUtils.showError("Error in calculating the slice timing; is the camera mode set correctly?");
        return false;
    }

    AcquisitionSettings acqSettingsOrig = getCurrentAcquisitionSettings();

    if (acqSettingsOrig.cameraMode == CameraModes.Keys.LIGHT_SHEET && core_.getPixelSizeUm() < 1e-6) { // can't compare equality directly with floating point values so call < 1e-9 is zero or negative
        ReportingUtils.showError("Need to configure pixel size in Micro-Manager to use light sheet mode.");
        return false;
    }

    // if a test acquisition then only run single timpoint, no autofocus
    // allow multi-positions for test acquisition for now, though perhaps this is not desirable
    if (testAcq) {
        acqSettingsOrig.useTimepoints = false;
        acqSettingsOrig.numTimepoints = 1;
        acqSettingsOrig.useAutofocus = false;
        acqSettingsOrig.separateTimepoints = false;

        // if called from the setup panels then the side will be specified
        //   so we can do an appropriate single-sided acquisition
        // if called from the acquisition panel then NONE will be specified
        //   and run according to existing settings
        if (testAcqSide != Devices.Sides.NONE) {
            acqSettingsOrig.numSides = 1;
            acqSettingsOrig.firstSideIsA = (testAcqSide == Devices.Sides.A);
        }

        // work around limitation of not being able to use PLogic per-volume switching with single side
        // => do per-volume switching instead (only difference should be extra time to switch)
        if (acqSettingsOrig.useChannels && acqSettingsOrig.channelMode == MultichannelModes.Keys.VOLUME_HW
                && acqSettingsOrig.numSides < 2) {
            acqSettingsOrig.channelMode = MultichannelModes.Keys.VOLUME;
        }

    }

    double volumeDuration = computeActualVolumeDuration(acqSettingsOrig);
    double timepointDuration = computeTimepointDuration();
    long timepointIntervalMs = Math.round(acqSettingsOrig.timepointInterval * 1000);

    // use hardware timing if < 1 second between timepoints
    // experimentally need ~0.5 sec to set up acquisition, this gives a bit of cushion
    // cannot do this in getCurrentAcquisitionSettings because of mutually recursive
    // call with computeActualVolumeDuration()
    if (acqSettingsOrig.numTimepoints > 1 && timepointIntervalMs < (timepointDuration + 750)
            && !acqSettingsOrig.isStageScanning) {
        acqSettingsOrig.hardwareTimepoints = true;
    }

    if (acqSettingsOrig.useMultiPositions) {
        if (acqSettingsOrig.hardwareTimepoints
                || ((acqSettingsOrig.numTimepoints > 1) && (timepointIntervalMs < timepointDuration * 1.2))) {
            // change to not hardwareTimepoints and warn user
            // but allow acquisition to continue
            acqSettingsOrig.hardwareTimepoints = false;
            MyDialogUtils.showError("Timepoint interval may not be sufficient "
                    + "depending on actual time required to change positions. " + "Proceed at your own risk.");
        }
    }

    // now acqSettings should be read-only
    final AcquisitionSettings acqSettings = acqSettingsOrig;

    // generate string for log file
    Gson gson = new GsonBuilder().setPrettyPrinting().create();
    final String acqSettingsJSON = gson.toJson(acqSettings);

    // get MM device names for first/second cameras to acquire
    String firstCamera, secondCamera;
    Devices.Keys firstCameraKey, secondCameraKey;
    boolean firstSideA = acqSettings.firstSideIsA;
    if (firstSideA) {
        firstCamera = devices_.getMMDevice(Devices.Keys.CAMERAA);
        firstCameraKey = Devices.Keys.CAMERAA;
        secondCamera = devices_.getMMDevice(Devices.Keys.CAMERAB);
        secondCameraKey = Devices.Keys.CAMERAB;
    } else {
        firstCamera = devices_.getMMDevice(Devices.Keys.CAMERAB);
        firstCameraKey = Devices.Keys.CAMERAB;
        secondCamera = devices_.getMMDevice(Devices.Keys.CAMERAA);
        secondCameraKey = Devices.Keys.CAMERAA;
    }

    boolean sideActiveA, sideActiveB;
    final boolean twoSided = acqSettings.numSides > 1;
    if (twoSided) {
        sideActiveA = true;
        sideActiveB = true;
    } else {
        secondCamera = null;
        if (firstSideA) {
            sideActiveA = true;
            sideActiveB = false;
        } else {
            sideActiveA = false;
            sideActiveB = true;
        }
    }

    final boolean acqBothCameras = acqSettings.acquireBothCamerasSimultaneously;
    boolean camActiveA = sideActiveA || acqBothCameras;
    boolean camActiveB = sideActiveB || acqBothCameras;

    if (camActiveA) {
        if (!devices_.isValidMMDevice(Devices.Keys.CAMERAA)) {
            MyDialogUtils.showError("Using side A but no camera specified for that side.");
            return false;
        }
        Devices.Keys camKey = Devices.Keys.CAMERAA;
        Devices.Libraries camLib = devices_.getMMDeviceLibrary(camKey);
        if (!CameraModes.getValidModeKeys(camLib).contains(getSPIMCameraMode())) {
            MyDialogUtils.showError("Camera trigger mode set to " + getSPIMCameraMode().toString()
                    + " but camera A doesn't support it.");
            return false;
        }
        // Hamamatsu only supports light sheet mode with USB cameras.  Tt seems due to static architecture of getValidModeKeys
        //   there is no good way to tell earlier that light sheet mode isn't supported.  I don't like this but don't see another option.
        if (camLib == Devices.Libraries.HAMCAM && props_.getPropValueString(camKey, Properties.Keys.CAMERA_BUS)
                .equals(Properties.Values.USB3)) {
            if (getSPIMCameraMode() == CameraModes.Keys.LIGHT_SHEET) {
                MyDialogUtils.showError("Hamamatsu only supports light sheet mode with CameraLink readout.");
                return false;
            }
        }
    }

    if (sideActiveA) {
        if (!devices_.isValidMMDevice(Devices.Keys.GALVOA)) {
            MyDialogUtils.showError("Using side A but no scanner specified for that side.");
            return false;
        }
        if (requiresPiezos(acqSettings.spimMode) && !devices_.isValidMMDevice(Devices.Keys.PIEZOA)) {
            MyDialogUtils.showError(
                    "Using side A and acquisition mode requires piezos but no piezo specified for that side.");
            return false;
        }
    }

    if (camActiveB) {
        if (!devices_.isValidMMDevice(Devices.Keys.CAMERAB)) {
            MyDialogUtils.showError("Using side B but no camera specified for that side.");
            return false;
        }
        if (!CameraModes.getValidModeKeys(devices_.getMMDeviceLibrary(Devices.Keys.CAMERAB))
                .contains(getSPIMCameraMode())) {
            MyDialogUtils.showError("Camera trigger mode set to " + getSPIMCameraMode().toString()
                    + " but camera B doesn't support it.");
            return false;
        }
    }

    if (sideActiveB) {
        if (!devices_.isValidMMDevice(Devices.Keys.GALVOB)) {
            MyDialogUtils.showError("Using side B but no scanner specified for that side.");
            return false;
        }
        if (requiresPiezos(acqSettings.spimMode) && !devices_.isValidMMDevice(Devices.Keys.PIEZOB)) {
            MyDialogUtils.showError(
                    "Using side B and acquisition mode requires piezos but no piezo specified for that side.");
            return false;
        }
    }

    boolean usingDemoCam = (devices_.getMMDeviceLibrary(Devices.Keys.CAMERAA).equals(Devices.Libraries.DEMOCAM)
            && camActiveA)
            || (devices_.getMMDeviceLibrary(Devices.Keys.CAMERAB).equals(Devices.Libraries.DEMOCAM)
                    && camActiveB);

    // set up channels
    int nrChannelsSoftware = acqSettings.numChannels; // how many times we trigger the controller per stack
    int nrSlicesSoftware = acqSettings.numSlices;
    String originalChannelConfig = "";
    boolean changeChannelPerVolumeSoftware = false;
    if (acqSettings.useChannels) {
        if (acqSettings.numChannels < 1) {
            MyDialogUtils.showError("\"Channels\" is checked, but no channels are selected");
            return false;
        }
        // get current channel so that we can restore it, then set channel appropriately
        originalChannelConfig = multiChannelPanel_.getCurrentConfig();
        switch (acqSettings.channelMode) {
        case VOLUME:
            changeChannelPerVolumeSoftware = true;
            multiChannelPanel_.initializeChannelCycle();
            break;
        case VOLUME_HW:
        case SLICE_HW:
            if (acqSettings.numChannels == 1) { // only 1 channel selected so don't have to really use hardware switching
                multiChannelPanel_.initializeChannelCycle();
                multiChannelPanel_.selectNextChannel();
            } else { // we have at least 2 channels
                boolean success = controller_.setupHardwareChannelSwitching(acqSettings);
                if (!success) {
                    MyDialogUtils.showError("Couldn't set up slice hardware channel switching.");
                    return false;
                }
                nrChannelsSoftware = 1;
                nrSlicesSoftware = acqSettings.numSlices * acqSettings.numChannels;
            }
            break;
        default:
            MyDialogUtils
                    .showError("Unsupported multichannel mode \"" + acqSettings.channelMode.toString() + "\"");
            return false;
        }
    }
    if (twoSided && acqBothCameras) {
        nrSlicesSoftware *= 2;
    }

    if (acqSettings.hardwareTimepoints) {
        // in hardwareTimepoints case we trigger controller once for all timepoints => need to
        //   adjust number of frames we expect back from the camera during MM's SequenceAcquisition
        if (acqSettings.cameraMode == CameraModes.Keys.OVERLAP) {
            // For overlap mode we are send one extra trigger per channel per side for volume-switching (both PLogic and not)
            // This holds for all multi-channel modes, just the order in which the extra trigger comes varies
            // Very last trigger won't ever return a frame so subtract 1.
            nrSlicesSoftware = ((acqSettings.numSlices + 1) * acqSettings.numChannels
                    * acqSettings.numTimepoints);
            if (twoSided && acqBothCameras) {
                nrSlicesSoftware *= 2;
            }
            nrSlicesSoftware -= 1;
        } else {
            // we get back one image per trigger for all trigger modes other than OVERLAP
            //   and we have already computed how many images that is (nrSlicesSoftware)
            nrSlicesSoftware *= acqSettings.numTimepoints;
            if (twoSided && acqBothCameras) {
                nrSlicesSoftware *= 2;
            }
        }
    }

    // set up XY positions
    int nrPositions = 1;
    PositionList positionList = new PositionList();
    if (acqSettings.useMultiPositions) {
        try {
            positionList = gui_.getPositionList();
            nrPositions = positionList.getNumberOfPositions();
        } catch (MMScriptException ex) {
            MyDialogUtils.showError(ex, "Error getting position list for multiple XY positions");
        }
        if (nrPositions < 1) {
            MyDialogUtils.showError("\"Positions\" is checked, but no positions are in position list");
            return false;
        }
    }

    // make sure we have cameras selected
    if (!checkCamerasAssigned(true)) {
        return false;
    }

    final float cameraReadoutTime = computeCameraReadoutTime();
    final double exposureTime = acqSettings.sliceTiming.cameraExposure;

    final boolean save = saveCB_.isSelected() && !testAcq;
    final String rootDir = rootField_.getText();

    // make sure we have a valid directory to save in
    final File dir = new File(rootDir);
    if (save) {
        try {
            if (!dir.exists()) {
                if (!dir.mkdir()) {
                    throw new Exception();
                }
            }
        } catch (Exception ex) {
            MyDialogUtils.showError("Could not create directory for saving acquisition data.");
            return false;
        }
    }

    if (acqSettings.separateTimepoints) {
        // because separate timepoints closes windows when done, force the user to save data to disk to avoid confusion
        if (!save) {
            MyDialogUtils.showError("For separate timepoints, \"Save while acquiring\" must be enabled.");
            return false;
        }
        // for separate timepoints, make sure the directory is empty to make sure naming pattern is "clean"
        // this is an arbitrary choice to avoid confusion later on when looking at file names
        if (dir.list().length > 0) {
            MyDialogUtils.showError("For separate timepoints the saving directory must be empty.");
            return false;
        }
    }

    int nrFrames; // how many Micro-manager "frames" = time points to take
    if (acqSettings.separateTimepoints) {
        nrFrames = 1;
        nrRepeats_ = acqSettings.numTimepoints;
    } else {
        nrFrames = acqSettings.numTimepoints;
        nrRepeats_ = 1;
    }

    AcquisitionModes.Keys spimMode = acqSettings.spimMode;

    boolean autoShutter = core_.getAutoShutter();
    boolean shutterOpen = false; // will read later
    String originalCamera = core_.getCameraDevice();

    // more sanity checks
    // TODO move these checks earlier, before we set up channels and XY positions

    // make sure stage scan is supported if selected
    if (acqSettings.isStageScanning) {
        if (!devices_.isTigerDevice(Devices.Keys.XYSTAGE)
                || !props_.hasProperty(Devices.Keys.XYSTAGE, Properties.Keys.STAGESCAN_NUMLINES)) {
            MyDialogUtils.showError("Must have stage with scan-enabled firmware for stage scanning.");
            return false;
        }
        if (acqSettings.spimMode == AcquisitionModes.Keys.STAGE_SCAN_INTERLEAVED && acqSettings.numSides < 2) {
            MyDialogUtils.showError("Interleaved mode requires two sides.");
            return false;
        }
    }

    double sliceDuration = acqSettings.sliceTiming.sliceDuration;
    if (exposureTime + cameraReadoutTime > sliceDuration) {
        // should only only possible to mess this up using advanced timing settings
        // or if there are errors in our own calculations
        MyDialogUtils.showError("Exposure time of " + exposureTime
                + " is longer than time needed for a line scan with" + " readout time of " + cameraReadoutTime
                + "\n" + "This will result in dropped frames. " + "Please change input");
        return false;
    }

    // if we want to do hardware timepoints make sure there's not a problem
    // lots of different situations where hardware timepoints can't be used...
    if (acqSettings.hardwareTimepoints) {
        if (acqSettings.useChannels && acqSettings.channelMode == MultichannelModes.Keys.VOLUME_HW) {
            // both hardware time points and volume channel switching use SPIMNumRepeats property
            // TODO this seems a severe limitation, maybe this could be changed in the future via firmware change
            MyDialogUtils.showError("Cannot use hardware time points (small time point interval)"
                    + " with hardware channel switching volume-by-volume.");
            return false;
        }
        if (acqSettings.isStageScanning) {
            // stage scanning needs to be triggered for each time point
            MyDialogUtils.showError(
                    "Cannot use hardware time points (small time point interval)" + " with stage scanning.");
            return false;
        }
        if (acqSettings.separateTimepoints) {
            MyDialogUtils.showError("Cannot use hardware time points (small time point interval)"
                    + " with separate viewers/file for each time point.");
            return false;
        }
        if (acqSettings.useAutofocus) {
            MyDialogUtils.showError("Cannot use hardware time points (small time point interval)"
                    + " with autofocus during acquisition.");
            return false;
        }
        if (acqSettings.useMovementCorrection) {
            MyDialogUtils.showError("Cannot use hardware time points (small time point interval)"
                    + " with movement correction during acquisition.");
            return false;
        }
        if (acqSettings.useChannels && acqSettings.channelMode == MultichannelModes.Keys.VOLUME) {
            MyDialogUtils.showError("Cannot use hardware time points (small time point interval)"
                    + " with software channels (need to use PLogic channel switching).");
            return false;
        }
        if (spimMode == AcquisitionModes.Keys.NO_SCAN) {
            MyDialogUtils.showError("Cannot do hardware time points when no scan mode is used."
                    + " Use the number of slices to set the number of images to acquire.");
            return false;
        }
    }

    if (acqSettings.useChannels && acqSettings.channelMode == MultichannelModes.Keys.VOLUME_HW
            && acqSettings.numSides < 2) {
        MyDialogUtils.showError("Cannot do PLogic channel switching of volume when only one"
                + " side is selected. Pester the developers if you need this.");
        return false;
    }

    // make sure we aren't trying to collect timepoints faster than we can
    if (!acqSettings.useMultiPositions && acqSettings.numTimepoints > 1) {
        if (timepointIntervalMs < volumeDuration) {
            MyDialogUtils
                    .showError("Time point interval shorter than" + " the time to collect a single volume.\n");
            return false;
        }
    }

    // Autofocus settings; only used if acqSettings.useAutofocus is true
    boolean autofocusAtT0 = false;
    int autofocusEachNFrames = 10;
    String autofocusChannel = "";
    if (acqSettings.useAutofocus) {
        autofocusAtT0 = prefs_.getBoolean(MyStrings.PanelNames.AUTOFOCUS.toString(),
                Properties.Keys.PLUGIN_AUTOFOCUS_ACQBEFORESTART, false);
        autofocusEachNFrames = props_.getPropValueInteger(Devices.Keys.PLUGIN,
                Properties.Keys.PLUGIN_AUTOFOCUS_EACHNIMAGES);
        autofocusChannel = props_.getPropValueString(Devices.Keys.PLUGIN,
                Properties.Keys.PLUGIN_AUTOFOCUS_CHANNEL);
        // double-check that selected channel is valid if we are doing multi-channel
        if (acqSettings.useChannels) {
            String channelGroup = props_.getPropValueString(Devices.Keys.PLUGIN,
                    Properties.Keys.PLUGIN_MULTICHANNEL_GROUP);
            StrVector channels = gui_.getMMCore().getAvailableConfigs(channelGroup);
            boolean found = false;
            for (String channel : channels) {
                if (channel.equals(autofocusChannel)) {
                    found = true;
                    break;
                }
            }
            if (!found) {
                MyDialogUtils.showError("Invalid autofocus channel selected on autofocus tab.");
                return false;
            }
        }
    }

    // Movement Correction settings; only used if acqSettings.useMovementCorrection is true
    int correctMovementEachNFrames = 10;
    String correctMovementChannel = "";
    int cmChannelNumber = -1;
    if (acqSettings.useMovementCorrection) {
        correctMovementEachNFrames = props_.getPropValueInteger(Devices.Keys.PLUGIN,
                Properties.Keys.PLUGIN_AUTOFOCUS_CORRECTMOVEMENT_EACHNIMAGES);
        correctMovementChannel = props_.getPropValueString(Devices.Keys.PLUGIN,
                Properties.Keys.PLUGIN_AUTOFOCUS_CORRECTMOVEMENT_CHANNEL);
        // double-check that selected channel is valid if we are doing multi-channel
        if (acqSettings.useChannels) {
            String channelGroup = props_.getPropValueString(Devices.Keys.PLUGIN,
                    Properties.Keys.PLUGIN_MULTICHANNEL_GROUP);
            StrVector channels = gui_.getMMCore().getAvailableConfigs(channelGroup);
            boolean found = false;
            for (String channel : channels) {
                if (channel.equals(correctMovementChannel)) {
                    found = true;
                    break;
                }
            }
            if (!found) {
                MyDialogUtils.showError("Invalid movement correction channel selected on autofocus tab.");
                return false;
            }
        }

    }

    // the circular buffer, which is used by both cameras, can only have one image size setting
    //    => require same image height and width for both cameras if both are used 
    if (twoSided || acqBothCameras) {
        try {
            Rectangle roi_1 = core_.getROI(firstCamera);
            Rectangle roi_2 = core_.getROI(secondCamera);
            if (roi_1.width != roi_2.width || roi_1.height != roi_2.height) {
                MyDialogUtils.showError(
                        "Two cameras' ROI height and width must be equal because of Micro-Manager's circular buffer");
                return false;
            }
        } catch (Exception ex) {
            MyDialogUtils.showError(ex, "Problem getting camera ROIs");
        }
    }

    cameras_.setCameraForAcquisition(firstCameraKey, true);
    if (twoSided || acqBothCameras) {
        cameras_.setCameraForAcquisition(secondCameraKey, true);
    }

    // save exposure time, will restore at end of acquisition
    try {
        prefs_.putFloat(MyStrings.PanelNames.SETTINGS.toString(),
                Properties.Keys.PLUGIN_CAMERA_LIVE_EXPOSURE_FIRST.toString(),
                (float) core_.getExposure(devices_.getMMDevice(firstCameraKey)));
        if (twoSided || acqBothCameras) {
            prefs_.putFloat(MyStrings.PanelNames.SETTINGS.toString(),
                    Properties.Keys.PLUGIN_CAMERA_LIVE_EXPOSURE_SECOND.toString(),
                    (float) core_.getExposure(devices_.getMMDevice(secondCameraKey)));
        }
    } catch (Exception ex) {
        MyDialogUtils.showError(ex, "could not cache exposure");
    }

    try {
        core_.setExposure(firstCamera, exposureTime);
        if (twoSided || acqBothCameras) {
            core_.setExposure(secondCamera, exposureTime);
        }
        gui_.refreshGUIFromCache();
    } catch (Exception ex) {
        MyDialogUtils.showError(ex, "could not set exposure");
    }

    // seems to have a problem if the core's camera has been set to some other
    // camera before we start doing things, so set to a SPIM camera
    try {
        core_.setCameraDevice(firstCamera);
    } catch (Exception ex) {
        MyDialogUtils.showError(ex, "could not set camera");
    }

    // empty out circular buffer
    try {
        core_.clearCircularBuffer();
    } catch (Exception ex) {
        MyDialogUtils.showError(ex, "Error emptying out the circular buffer");
        return false;
    }

    // stop the serial traffic for position updates during acquisition
    // if we return from this function (including aborting) we need to unpause
    posUpdater_.pauseUpdates(true);

    // initialize stage scanning so we can restore state
    Point2D.Double xyPosUm = new Point2D.Double();
    float origXSpeed = 1f; // don't want 0 in case something goes wrong
    float origXAccel = 1f; // don't want 0 in case something goes wrong
    if (acqSettings.isStageScanning) {
        try {
            xyPosUm = core_.getXYStagePosition(devices_.getMMDevice(Devices.Keys.XYSTAGE));
            origXSpeed = props_.getPropValueFloat(Devices.Keys.XYSTAGE, Properties.Keys.STAGESCAN_MOTOR_SPEED);
            origXAccel = props_.getPropValueFloat(Devices.Keys.XYSTAGE, Properties.Keys.STAGESCAN_MOTOR_ACCEL);
        } catch (Exception ex) {
            MyDialogUtils.showError(
                    "Could not get XY stage position, speed, or acceleration for stage scan initialization");
            posUpdater_.pauseUpdates(false);
            return false;
        }

        // if X speed is less than 0.2 mm/s then it probably wasn't restored to correct speed some other time
        // we offer to set it to a more normal speed in that case, until the user declines and we stop asking
        if (origXSpeed < 0.2 && resetXaxisSpeed_) {
            resetXaxisSpeed_ = MyDialogUtils.getConfirmDialogResult(
                    "Max speed of X axis is small, perhaps it was not correctly restored after stage scanning previously.  Do you want to set it to 1 mm/s now?",
                    JOptionPane.YES_NO_OPTION);
            // once the user selects "no" then resetXaxisSpeed_ will be false and stay false until plugin is launched again
            if (resetXaxisSpeed_) {
                props_.setPropValue(Devices.Keys.XYSTAGE, Properties.Keys.STAGESCAN_MOTOR_SPEED, 1f);
                origXSpeed = 1f;
            }
        }
    }

    numTimePointsDone_ = 0;

    // force saving as image stacks, not individual files
    // implementation assumes just two options, either 
    //  TaggedImageStorageDiskDefault.class or TaggedImageStorageMultipageTiff.class
    boolean separateImageFilesOriginally = ImageUtils.getImageStorageClass()
            .equals(TaggedImageStorageDiskDefault.class);
    ImageUtils.setImageStorageClass(TaggedImageStorageMultipageTiff.class);

    // Set up controller SPIM parameters (including from Setup panel settings)
    // want to do this, even with demo cameras, so we can test everything else
    if (!controller_.prepareControllerForAquisition(acqSettings)) {
        posUpdater_.pauseUpdates(false);
        return false;
    }

    boolean nonfatalError = false;
    long acqButtonStart = System.currentTimeMillis();
    String acqName = "";
    acq_ = null;

    // do not want to return from within this loop => throw exception instead
    // loop is executed once per acquisition (i.e. once if separate viewers isn't selected
    //   or once per timepoint if separate viewers is selected)
    long repeatStart = System.currentTimeMillis();
    for (int acqNum = 0; !cancelAcquisition_.get() && acqNum < nrRepeats_; acqNum++) {
        // handle intervals between (software-timed) repeats
        // only applies when doing separate viewers for each timepoint
        // and have multiple timepoints
        long repeatNow = System.currentTimeMillis();
        long repeatdelay = repeatStart + acqNum * timepointIntervalMs - repeatNow;
        while (repeatdelay > 0 && !cancelAcquisition_.get()) {
            updateAcquisitionStatus(AcquisitionStatus.WAITING, (int) (repeatdelay / 1000));
            long sleepTime = Math.min(1000, repeatdelay);
            try {
                Thread.sleep(sleepTime);
            } catch (InterruptedException e) {
                ReportingUtils.showError(e);
            }
            repeatNow = System.currentTimeMillis();
            repeatdelay = repeatStart + acqNum * timepointIntervalMs - repeatNow;
        }

        BlockingQueue<TaggedImage> bq = new LinkedBlockingQueue<TaggedImage>(10);

        // try to close last acquisition viewer if there could be one open (only in single acquisition per timepoint mode)
        if (acqSettings.separateTimepoints && (acq_ != null) && !cancelAcquisition_.get()) {
            try {
                // following line needed due to some arcane internal reason, otherwise
                //   call to closeAcquisitionWindow() fails silently. 
                //   See http://sourceforge.net/p/micro-manager/mailman/message/32999320/
                acq_.promptToSave(false);
                gui_.closeAcquisitionWindow(acqName);
            } catch (Exception ex) {
                // do nothing if unsuccessful
            }
        }

        if (acqSettings.separateTimepoints) {
            // call to getUniqueAcquisitionName is extra safety net, we have checked that directory is empty before starting
            acqName = gui_.getUniqueAcquisitionName(prefixField_.getText() + "_" + acqNum);
        } else {
            acqName = gui_.getUniqueAcquisitionName(prefixField_.getText());
        }

        long extraStageScanTimeout = 0;
        if (acqSettings.isStageScanning) {
            // approximately compute the extra time to wait for stack to begin (ramp up time)
            //   by getting the volume duration and subtracting the acquisition duration and then dividing by two
            extraStageScanTimeout = (long) Math.ceil(computeActualVolumeDuration(acqSettings)
                    - (acqSettings.numSlices * acqSettings.numChannels * acqSettings.sliceTiming.sliceDuration))
                    / 2;
        }

        long extraMultiXYTimeout = 0;
        if (acqSettings.useMultiPositions) {
            // give 20 extra seconds to arrive at intended XY position instead of trying to get fancy about computing actual move time
            extraMultiXYTimeout = XYSTAGETIMEOUT;
            // furthermore make sure that the main timeout value is at least 20ms because MM's position list uses this (via MultiStagePosition.goToPosition)
            if (props_.getPropValueInteger(Devices.Keys.CORE,
                    Properties.Keys.CORE_TIMEOUT_MS) < XYSTAGETIMEOUT) {
                props_.setPropValue(Devices.Keys.CORE, Properties.Keys.CORE_TIMEOUT_MS, XYSTAGETIMEOUT);
            }
        }

        VirtualAcquisitionDisplay vad = null;
        WindowListener wl_acq = null;
        WindowListener[] wls_orig = null;
        try {
            // check for stop button before each acquisition
            if (cancelAcquisition_.get()) {
                throw new IllegalMonitorStateException("User stopped the acquisition");
            }

            // flag that we are actually running acquisition now
            acquisitionRunning_.set(true);

            ReportingUtils.logMessage("diSPIM plugin starting acquisition " + acqName
                    + " with following settings: " + acqSettingsJSON);

            final int numMMChannels = acqSettings.numSides * acqSettings.numChannels * (acqBothCameras ? 2 : 1);

            if (spimMode == AcquisitionModes.Keys.NO_SCAN && !acqSettings.separateTimepoints) {
                // swap nrFrames and numSlices
                gui_.openAcquisition(acqName, rootDir, acqSettings.numSlices, numMMChannels, nrFrames,
                        nrPositions, true, save);
            } else {
                gui_.openAcquisition(acqName, rootDir, nrFrames, numMMChannels, acqSettings.numSlices,
                        nrPositions, true, save);
            }

            channelNames_ = new String[numMMChannels];

            // generate channel names and colors
            // also builds viewString for MultiViewRegistration metadata
            String viewString = "";
            final String SEPARATOR = "_";
            for (int reflect = 0; reflect < 2; reflect++) {
                // only run for loop once unless acqBothCameras is true
                // if acqBothCameras is true then run second time to add "epi" channels
                if (reflect > 0 && !acqBothCameras) {
                    continue;
                }
                // set up channels (side A/B is treated as channel too)
                if (acqSettings.useChannels) {
                    ChannelSpec[] channels = multiChannelPanel_.getUsedChannels();
                    for (int i = 0; i < channels.length; i++) {
                        String chName = "-" + channels[i].config_ + (reflect > 0 ? "-epi" : "");
                        // same algorithm for channel index vs. specified channel and side as in comments of code below
                        //   that figures out the channel where to file each incoming image
                        int channelIndex = i;
                        if (twoSided) {
                            channelIndex *= 2;
                        }
                        channelIndex += reflect * numMMChannels / 2;
                        channelNames_[channelIndex] = firstCamera + chName;
                        viewString += NumberUtils.intToDisplayString(0) + SEPARATOR;
                        if (twoSided) {
                            channelNames_[channelIndex + 1] = secondCamera + chName;
                            viewString += NumberUtils.intToDisplayString(90) + SEPARATOR;
                        }
                    }
                } else { // single-channel
                    int channelIndex = reflect * numMMChannels / 2;
                    channelNames_[channelIndex] = firstCamera + (reflect > 0 ? "-epi" : "");
                    viewString += NumberUtils.intToDisplayString(0) + SEPARATOR;
                    if (twoSided) {
                        channelNames_[channelIndex + 1] = secondCamera + (reflect > 0 ? "-epi" : "");
                        viewString += NumberUtils.intToDisplayString(90) + SEPARATOR;
                    }
                }
            }
            // strip last separator of viewString (for Multiview Reconstruction)
            viewString = viewString.substring(0, viewString.length() - 1);

            // assign channel names and colors
            for (int i = 0; i < numMMChannels; i++) {
                gui_.setChannelName(acqName, i, channelNames_[i]);
                gui_.setChannelColor(acqName, i, getChannelColor(i));
            }

            if (acqSettings.useMovementCorrection) {
                for (int i = 0; i < acqSettings.numChannels; i++) {
                    if (channelNames_[i].equals(firstCamera + "-" + correctMovementChannel)) {
                        cmChannelNumber = i;
                    }
                }
                if (cmChannelNumber == -1) {
                    MyDialogUtils.showError(
                            "The channel selected for movement correction on the auitofocus tab was not found in this acquisition");
                    return false;
                }
            }

            zStepUm_ = acqSettings.isStageScanning ? controller_.getActualStepSizeUm() // computed step size, accounting for quantization of controller
                    : acqSettings.stepSizeUm; // should be same as PanelUtils.getSpinnerFloatValue(stepSize_)

            // initialize acquisition
            gui_.initializeAcquisition(acqName, (int) core_.getImageWidth(), (int) core_.getImageHeight(),
                    (int) core_.getBytesPerPixel(), (int) core_.getImageBitDepth());
            gui_.promptToSaveAcquisition(acqName, !testAcq);

            // These metadata have to be added after initialization, 
            // otherwise they will not be shown?!
            gui_.setAcquisitionProperty(acqName, "NumberOfSides",
                    NumberUtils.doubleToDisplayString(acqSettings.numSides));
            gui_.setAcquisitionProperty(acqName, "FirstSide", acqSettings.firstSideIsA ? "A" : "B");
            gui_.setAcquisitionProperty(acqName, "SlicePeriod_ms", actualSlicePeriodLabel_.getText());
            gui_.setAcquisitionProperty(acqName, "LaserExposure_ms",
                    NumberUtils.doubleToDisplayString(acqSettings.desiredLightExposure));
            gui_.setAcquisitionProperty(acqName, "VolumeDuration", actualVolumeDurationLabel_.getText());
            gui_.setAcquisitionProperty(acqName, "SPIMmode", spimMode.toString());
            // Multi-page TIFF saving code wants this one (cameras are all 16-bits, so not much reason for anything else)
            gui_.setAcquisitionProperty(acqName, "PixelType", "GRAY16");
            gui_.setAcquisitionProperty(acqName, "UseAutofocus",
                    acqSettings.useAutofocus ? Boolean.TRUE.toString() : Boolean.FALSE.toString());
            gui_.setAcquisitionProperty(acqName, "UseMotionCorrection",
                    acqSettings.useMovementCorrection ? Boolean.TRUE.toString() : Boolean.FALSE.toString());
            gui_.setAcquisitionProperty(acqName, "HardwareTimepoints",
                    acqSettings.hardwareTimepoints ? Boolean.TRUE.toString() : Boolean.FALSE.toString());
            gui_.setAcquisitionProperty(acqName, "SeparateTimepoints",
                    acqSettings.separateTimepoints ? Boolean.TRUE.toString() : Boolean.FALSE.toString());
            gui_.setAcquisitionProperty(acqName, "CameraMode", acqSettings.cameraMode.toString());
            gui_.setAcquisitionProperty(acqName, "z-step_um", NumberUtils.doubleToDisplayString(zStepUm_));
            // Properties for use by MultiViewRegistration plugin
            // Format is: x_y_z, set to 1 if we should rotate around this axis.
            gui_.setAcquisitionProperty(acqName, "MVRotationAxis", "0_1_0");
            gui_.setAcquisitionProperty(acqName, "MVRotations", viewString);
            // save XY and SPIM head position in metadata
            // update positions first at expense of two extra serial transactions
            refreshXYZPositions();
            gui_.setAcquisitionProperty(acqName, "Position_X",
                    positions_.getPositionString(Devices.Keys.XYSTAGE, Directions.X));
            gui_.setAcquisitionProperty(acqName, "Position_Y",
                    positions_.getPositionString(Devices.Keys.XYSTAGE, Directions.Y));
            gui_.setAcquisitionProperty(acqName, "Position_SPIM_Head",
                    positions_.getPositionString(Devices.Keys.UPPERZDRIVE));
            gui_.setAcquisitionProperty(acqName, "SPIMAcqSettings", acqSettingsJSON);
            gui_.setAcquisitionProperty(acqName, "SPIMtype", ASIdiSPIM.oSPIM ? "oSPIM" : "diSPIM");
            gui_.setAcquisitionProperty(acqName, "AcquisitionName", acqName);
            gui_.setAcquisitionProperty(acqName, "Prefix", acqName);

            // get circular buffer ready
            // do once here but not per-trigger; need to ensure ROI changes registered
            core_.initializeCircularBuffer(); // superset of clearCircularBuffer()

            // TODO: use new acquisition interface that goes through the pipeline
            //gui_.setAcquisitionAddImageAsynchronous(acqName); 
            acq_ = gui_.getAcquisition(acqName);

            // Dive into MM internals since script interface does not support pipelines
            ImageCache imageCache = acq_.getImageCache();
            vad = acq_.getAcquisitionWindow();
            imageCache.addImageCacheListener(vad);

            // Start pumping images into the ImageCache
            DefaultTaggedImageSink sink = new DefaultTaggedImageSink(bq, imageCache);
            sink.start();

            // remove usual window listener(s) and replace it with our own
            //   that will prompt before closing and cancel acquisition if confirmed
            // this should be considered a hack, it may not work perfectly
            // I have confirmed that there is only one windowListener and it seems to 
            //   also be related to window closing
            // Note that ImageJ's acquisition window is AWT instead of Swing
            wls_orig = vad.getImagePlus().getWindow().getWindowListeners();
            for (WindowListener l : wls_orig) {
                vad.getImagePlus().getWindow().removeWindowListener(l);
            }
            wl_acq = new WindowAdapter() {
                @Override
                public void windowClosing(WindowEvent arg0) {
                    // if running acquisition only close if user confirms
                    if (acquisitionRunning_.get()) {
                        boolean stop = MyDialogUtils.getConfirmDialogResult(
                                "Do you really want to abort the acquisition?", JOptionPane.YES_NO_OPTION);
                        if (stop) {
                            cancelAcquisition_.set(true);
                        }
                    }
                }
            };
            vad.getImagePlus().getWindow().addWindowListener(wl_acq);

            // patterned after implementation in MMStudio.java
            // will be null if not saving to disk
            lastAcquisitionPath_ = acq_.getImageCache().getDiskLocation();
            lastAcquisitionName_ = acqName;

            // only used when motion correction was requested
            MovementDetector[] movementDetectors = new MovementDetector[nrPositions];

            // Transformation matrices to convert between camera and stage coordinates
            final Vector3D yAxis = new Vector3D(0.0, 1.0, 0.0);
            final Rotation camARotation = new Rotation(yAxis, Math.toRadians(-45));
            final Rotation camBRotation = new Rotation(yAxis, Math.toRadians(45));

            final Vector3D zeroPoint = new Vector3D(0.0, 0.0, 0.0); // cache a zero point for efficiency

            // make sure all devices have arrived, e.g. a stage isn't still moving
            try {
                core_.waitForSystem();
            } catch (Exception e) {
                ReportingUtils.logError("error waiting for system");
            }

            // Loop over all the times we trigger the controller's acquisition
            //  (although if multi-channel with volume switching is selected there
            //   is inner loop to trigger once per channel)
            // remember acquisition start time for software-timed timepoints
            // For hardware-timed timepoints we only trigger the controller once
            long acqStart = System.currentTimeMillis();
            for (int trigNum = 0; trigNum < nrFrames; trigNum++) {
                // handle intervals between (software-timed) time points
                // when we are within the same acquisition
                // (if separate viewer is selected then nothing bad happens here
                // but waiting during interval handled elsewhere)
                long acqNow = System.currentTimeMillis();
                long delay = acqStart + trigNum * timepointIntervalMs - acqNow;
                while (delay > 0 && !cancelAcquisition_.get()) {
                    updateAcquisitionStatus(AcquisitionStatus.WAITING, (int) (delay / 1000));
                    long sleepTime = Math.min(1000, delay);
                    Thread.sleep(sleepTime);
                    acqNow = System.currentTimeMillis();
                    delay = acqStart + trigNum * timepointIntervalMs - acqNow;
                }

                // check for stop button before each time point
                if (cancelAcquisition_.get()) {
                    throw new IllegalMonitorStateException("User stopped the acquisition");
                }

                int timePoint = acqSettings.separateTimepoints ? acqNum : trigNum;

                // this is where we autofocus if requested
                if (acqSettings.useAutofocus) {
                    // Note that we will not autofocus as expected when using hardware
                    // timing.  Seems OK, since hardware timing will result in short
                    // acquisition times that do not need autofocus.  We have already
                    // ensured that we aren't doing both
                    if ((autofocusAtT0 && timePoint == 0)
                            || ((timePoint > 0) && (timePoint % autofocusEachNFrames == 0))) {
                        if (acqSettings.useChannels) {
                            multiChannelPanel_.selectChannel(autofocusChannel);
                        }
                        if (sideActiveA) {
                            AutofocusUtils.FocusResult score = autofocus_.runFocus(this, Devices.Sides.A, false,
                                    sliceTiming_, false);
                            updateCalibrationOffset(Devices.Sides.A, score);
                        }
                        if (sideActiveB) {
                            AutofocusUtils.FocusResult score = autofocus_.runFocus(this, Devices.Sides.B, false,
                                    sliceTiming_, false);
                            updateCalibrationOffset(Devices.Sides.B, score);
                        }
                        // Restore settings of the controller
                        controller_.prepareControllerForAquisition(acqSettings);
                        if (acqSettings.useChannels
                                && acqSettings.channelMode != MultichannelModes.Keys.VOLUME) {
                            controller_.setupHardwareChannelSwitching(acqSettings);
                        }
                        // make sure circular buffer is cleared
                        core_.clearCircularBuffer();
                    }
                }

                numTimePointsDone_++;
                updateAcquisitionStatus(AcquisitionStatus.ACQUIRING);

                // loop over all positions
                for (int positionNum = 0; positionNum < nrPositions; positionNum++) {
                    if (acqSettings.useMultiPositions) {

                        // make sure user didn't stop things
                        if (cancelAcquisition_.get()) {
                            throw new IllegalMonitorStateException("User stopped the acquisition");
                        }

                        // want to move between positions move stage fast, so we 
                        //   will clobber stage scanning setting so need to restore it
                        float scanXSpeed = 1f;
                        float scanXAccel = 1f;
                        if (acqSettings.isStageScanning) {
                            scanXSpeed = props_.getPropValueFloat(Devices.Keys.XYSTAGE,
                                    Properties.Keys.STAGESCAN_MOTOR_SPEED);
                            props_.setPropValue(Devices.Keys.XYSTAGE, Properties.Keys.STAGESCAN_MOTOR_SPEED,
                                    origXSpeed);
                            scanXAccel = props_.getPropValueFloat(Devices.Keys.XYSTAGE,
                                    Properties.Keys.STAGESCAN_MOTOR_ACCEL);
                            props_.setPropValue(Devices.Keys.XYSTAGE, Properties.Keys.STAGESCAN_MOTOR_ACCEL,
                                    origXAccel);
                        }

                        final MultiStagePosition nextPosition = positionList.getPosition(positionNum);

                        // blocking call; will wait for stages to move
                        MultiStagePosition.goToPosition(nextPosition, core_);

                        // for stage scanning: restore speed and set up scan at new position 
                        // non-multi-position situation is handled in prepareControllerForAquisition instead
                        if (acqSettings.isStageScanning) {
                            props_.setPropValue(Devices.Keys.XYSTAGE, Properties.Keys.STAGESCAN_MOTOR_SPEED,
                                    scanXSpeed);
                            props_.setPropValue(Devices.Keys.XYSTAGE, Properties.Keys.STAGESCAN_MOTOR_ACCEL,
                                    scanXAccel);
                            StagePosition pos = nextPosition.get(devices_.getMMDevice(Devices.Keys.XYSTAGE)); // get ideal position from position list, not current position
                            controller_.prepareStageScanForAcquisition(pos.x, pos.y);
                        }

                        refreshXYZPositions();

                        // wait any extra time the user requests
                        Thread.sleep(Math.round(PanelUtils.getSpinnerFloatValue(positionDelay_)));
                    }

                    // loop over all the times we trigger the controller
                    // usually just once, but will be the number of channels if we have
                    //  multiple channels and aren't using PLogic to change between them
                    for (int channelNum = 0; channelNum < nrChannelsSoftware; channelNum++) {
                        try {
                            // flag that we are using the cameras/controller
                            ASIdiSPIM.getFrame().setHardwareInUse(true);

                            // deal with shutter before starting acquisition
                            shutterOpen = core_.getShutterOpen();
                            if (autoShutter) {
                                core_.setAutoShutter(false);
                                if (!shutterOpen) {
                                    core_.setShutterOpen(true);
                                }
                            }

                            // start the cameras
                            core_.startSequenceAcquisition(firstCamera, nrSlicesSoftware, 0, true);
                            if (twoSided || acqBothCameras) {
                                core_.startSequenceAcquisition(secondCamera, nrSlicesSoftware, 0, true);
                            }

                            // deal with channel if needed (hardware channel switching doesn't happen here)
                            if (changeChannelPerVolumeSoftware) {
                                multiChannelPanel_.selectNextChannel();
                            }

                            // special case: single-sided piezo acquisition risks illumination piezo sleeping
                            // prevent this from happening by sending relative move of 0 like we do in live mode before each trigger
                            // NB: this won't help for hardware-timed timepoints
                            final Devices.Keys piezoIllumKey = firstSideA ? Devices.Keys.PIEZOB
                                    : Devices.Keys.PIEZOA;
                            if (!twoSided && props_.getPropValueInteger(piezoIllumKey,
                                    Properties.Keys.AUTO_SLEEP_DELAY) > 0) {
                                core_.setRelativePosition(devices_.getMMDevice(piezoIllumKey), 0);
                            }

                            // trigger the state machine on the controller
                            // do this even with demo cameras to test everything else
                            boolean success = controller_.triggerControllerStartAcquisition(spimMode,
                                    firstSideA);
                            if (!success) {
                                throw new Exception("Controller triggering not successful");
                            }

                            ReportingUtils.logDebugMessage("Starting time point " + (timePoint + 1) + " of "
                                    + nrFrames + " with (software) channel number " + channelNum);

                            // Wait for first image to create ImageWindow, so that we can be sure about image size
                            // Do not actually grab first image here, just make sure it is there
                            long start = System.currentTimeMillis();
                            long now = start;
                            final long timeout = Math.max(3000,
                                    Math.round(10 * sliceDuration + 2 * acqSettings.delayBeforeSide))
                                    + extraStageScanTimeout + extraMultiXYTimeout;
                            while (core_.getRemainingImageCount() == 0 && (now - start < timeout)
                                    && !cancelAcquisition_.get()) {
                                now = System.currentTimeMillis();
                                Thread.sleep(5);
                            }
                            if (now - start >= timeout) {
                                String msg = "Camera did not send first image within a reasonable time.\n";
                                if (acqSettings.isStageScanning) {
                                    msg += "Make sure jumpers are correct on XY card and also micro-micromirror card.";
                                } else {
                                    msg += "Make sure camera trigger cables are connected properly.";
                                }
                                throw new Exception(msg);
                            }

                            // grab all the images from the cameras, put them into the acquisition
                            int[] channelImageNr = new int[4 * acqSettings.numChannels]; // keep track of how many frames we have received for each MM "channel"
                            int[] cameraImageNr = new int[2]; // keep track of how many images we have received from the camera
                            int[] tpNumber = new int[2 * acqSettings.numChannels]; // keep track of which timepoint we are on for hardware timepoints
                            int imagesToSkip = 0; // hardware timepoints have to drop spurious images with overlap mode
                            final boolean checkForSkips = acqSettings.hardwareTimepoints
                                    && (acqSettings.cameraMode == CameraModes.Keys.OVERLAP);
                            boolean done = false;
                            long timeout2 = Math.max(1000, Math.round(5 * sliceDuration));
                            if (acqSettings.isStageScanning) { // for stage scanning have to allow extra time for turn-around
                                timeout2 += (2 * (long) Math.ceil(getStageRampDuration(acqSettings))); // ramp up and then down
                                timeout2 += 5000; // ample extra time for turn-around (e.g. antibacklash move in Y), interestingly 500ms extra seems insufficient for reasons I don't understand yet so just pad this for now  // TODO figure out why turn-aronud is taking so long
                                if (acqSettings.spimMode == AcquisitionModes.Keys.STAGE_SCAN_UNIDIRECTIONAL) {
                                    timeout2 += (long) Math.ceil(getStageRetraceDuration(acqSettings)); // in unidirectional case also need to rewind
                                }
                            }
                            start = System.currentTimeMillis();
                            long last = start;
                            try {
                                while ((core_.getRemainingImageCount() > 0
                                        || core_.isSequenceRunning(firstCamera) || ((twoSided || acqBothCameras)
                                                && core_.isSequenceRunning(secondCamera)))
                                        && !done) {
                                    now = System.currentTimeMillis();
                                    if (core_.getRemainingImageCount() > 0) { // we have an image to grab
                                        TaggedImage timg = core_.popNextTaggedImage();

                                        if (checkForSkips && imagesToSkip != 0) {
                                            imagesToSkip--;
                                            continue; // goes to next iteration of this loop without doing anything else
                                        }

                                        // figure out which channel index this frame belongs to
                                        // "channel index" is channel of MM acquisition
                                        // channel indexes will go from 0 to (numSides * numChannels - 1) for standard (non-reflective) imaging
                                        // if double-sided then second camera gets odd channel indexes (1, 3, etc.)
                                        //    and adjacent pairs will be same color (e.g. 0 and 1 will be from first color, 2 and 3 from second, etc.)
                                        // if acquisition from both cameras (reflective imaging) then
                                        //    second half of channel indices are from opposite (epi) view
                                        // e.g. for 3-color 1-sided (A first) standard (non-reflective) then
                                        //    0 will be A-illum A-cam 1st color
                                        //    2 will be A-illum A-cam 2nd color
                                        //    4 will be A-illum A-cam 3rd color
                                        // e.g. for 3-color 2-sided (A first) standard (non-reflective) then
                                        //    0 will be A-illum A-cam 1st color
                                        //    1 will be B-illum B-cam 1st color
                                        //    2 will be A-illum A-cam 2nd color
                                        //    3 will be B-illum B-cam 2nd color
                                        //    4 will be A-illum A-cam 3rd color
                                        //    5 will be B-illum B-cam 3rd color
                                        // e.g. for 3-color 1-sided (A first) both camera (reflective) then
                                        //    0 will be A-illum A-cam 1st color
                                        //    1 will be A-illum A-cam 2nd color
                                        //    2 will be A-illum A-cam 3rd color
                                        //    3 will be A-illum B-cam 1st color
                                        //    4 will be A-illum B-cam 2nd color
                                        //    5 will be A-illum B-cam 3rd color
                                        // e.g. for 3-color 2-sided (A first) both camera (reflective) then
                                        //    0 will be A-illum A-cam 1st color
                                        //    1 will be B-illum B-cam 1st color
                                        //    2 will be A-illum A-cam 2nd color
                                        //    3 will be B-illum B-cam 2nd color
                                        //    4 will be A-illum A-cam 3rd color
                                        //    5 will be B-illum B-cam 3rd color
                                        //    6 will be A-illum B-cam 1st color
                                        //    7 will be B-illum A-cam 1st color
                                        //    8 will be A-illum B-cam 2nd color
                                        //    9 will be B-illum A-cam 2nd color
                                        //   10 will be A-illum B-cam 3rd color
                                        //   11 will be B-illum A-cam 3rd color
                                        String camera = (String) timg.tags.get("Camera");
                                        int cameraIndex = camera.equals(firstCamera) ? 0 : 1;
                                        int channelIndex_tmp;
                                        switch (acqSettings.channelMode) {
                                        case NONE:
                                        case VOLUME:
                                            channelIndex_tmp = channelNum;
                                            break;
                                        case VOLUME_HW:
                                            channelIndex_tmp = cameraImageNr[cameraIndex]
                                                    / acqSettings.numSlices; // want quotient only
                                            break;
                                        case SLICE_HW:
                                            channelIndex_tmp = cameraImageNr[cameraIndex]
                                                    % acqSettings.numChannels; // want modulo arithmetic
                                            break;
                                        default:
                                            // should never get here
                                            throw new Exception("Undefined channel mode");
                                        }
                                        if (acqBothCameras) {
                                            if (twoSided) { // 2-sided, both cameras
                                                channelIndex_tmp = channelIndex_tmp * 2 + cameraIndex;
                                                // determine whether first or second side by whether we've seen half the images yet
                                                if (cameraImageNr[cameraIndex] > nrSlicesSoftware / 2) {
                                                    // second illumination side => second half of channels
                                                    channelIndex_tmp += 2 * acqSettings.numChannels;
                                                }
                                            } else { // 1-sided, both cameras
                                                channelIndex_tmp += cameraIndex * acqSettings.numChannels;
                                            }
                                        } else { // normal situation, non-reflective imaging
                                            if (twoSided) {
                                                channelIndex_tmp *= 2;
                                            }
                                            channelIndex_tmp += cameraIndex;
                                        }
                                        final int channelIndex = channelIndex_tmp;

                                        int actualTimePoint = timePoint;
                                        if (acqSettings.hardwareTimepoints) {
                                            actualTimePoint = tpNumber[channelIndex];
                                        }
                                        if (acqSettings.separateTimepoints) {
                                            // if we are doing separate timepoints then frame is always 0
                                            actualTimePoint = 0;
                                        }
                                        // note that hardwareTimepoints and separateTimepoints can never both be true

                                        // add image to acquisition
                                        if (spimMode == AcquisitionModes.Keys.NO_SCAN
                                                && !acqSettings.separateTimepoints) {
                                            // create time series for no scan
                                            addImageToAcquisition(acq_, channelImageNr[channelIndex],
                                                    channelIndex, actualTimePoint, positionNum, now - acqStart,
                                                    timg, bq);
                                        } else { // standard, create Z-stacks
                                            addImageToAcquisition(acq_, actualTimePoint, channelIndex,
                                                    channelImageNr[channelIndex], positionNum, now - acqStart,
                                                    timg, bq);
                                        }

                                        // update our counters to be ready for next image
                                        channelImageNr[channelIndex]++;
                                        cameraImageNr[cameraIndex]++;

                                        // if hardware timepoints then we only send one trigger and
                                        //   manually keep track of which channel/timepoint comes next
                                        if (acqSettings.hardwareTimepoints
                                                && channelImageNr[channelIndex] >= acqSettings.numSlices) { // only do this if we are done with the slices in this MM channel

                                            // we just finished filling one MM channel with all its slices so go to next timepoint for this channel
                                            channelImageNr[channelIndex] = 0;
                                            tpNumber[channelIndex]++;

                                            // see if we are supposed to skip next image
                                            if (checkForSkips) {
                                                // one extra image per MM channel, this includes case of only 1 color (either multi-channel disabled or else only 1 channel selected)
                                                // if we are interleaving by slice then next nrChannel images will be from extra slice position
                                                // any other configuration we will just drop the next image
                                                if (acqSettings.useChannels
                                                        && acqSettings.channelMode == MultichannelModes.Keys.SLICE_HW) {
                                                    imagesToSkip = acqSettings.numChannels;
                                                } else {
                                                    imagesToSkip = 1;
                                                }
                                            }

                                            // update acquisition status message for hardware acquisition
                                            //   (for non-hardware acquisition message is updated elsewhere)
                                            //   Arbitrarily choose one possible channel to do this on.
                                            if (channelIndex == 0
                                                    && (numTimePointsDone_ < acqSettings.numTimepoints)) {
                                                numTimePointsDone_++;
                                                updateAcquisitionStatus(AcquisitionStatus.ACQUIRING);
                                            }
                                        }

                                        last = now; // keep track of last image timestamp

                                    } else { // no image ready yet
                                        done = cancelAcquisition_.get();
                                        Thread.sleep(1);
                                        if (now - last >= timeout2) {
                                            ReportingUtils
                                                    .logError("Camera did not send all expected images within"
                                                            + " a reasonable period for timepoint "
                                                            + numTimePointsDone_ + ".  Continuing anyway.");
                                            nonfatalError = true;
                                            done = true;
                                        }
                                    }
                                }

                                // update count if we stopped in the middle
                                if (cancelAcquisition_.get()) {
                                    numTimePointsDone_--;
                                }

                                // if we are using demo camera then add some extra time to let controller finish
                                // since we got images without waiting for controller to actually send triggers
                                if (usingDemoCam) {
                                    Thread.sleep(200); // for serial communication overhead
                                    Thread.sleep((long) volumeDuration / nrChannelsSoftware); // estimate the time per channel, not ideal in case of software channel switching
                                    if (acqSettings.isStageScanning) {
                                        Thread.sleep(1000 + extraStageScanTimeout); // extra 1 second plus ramp time for stage scanning 
                                    }
                                }

                            } catch (InterruptedException iex) {
                                MyDialogUtils.showError(iex);
                            }

                            if (acqSettings.hardwareTimepoints) {
                                break; // only trigger controller once
                            }

                        } catch (Exception ex) {
                            MyDialogUtils.showError(ex);
                        } finally {
                            // cleanup at the end of each time we trigger the controller

                            ASIdiSPIM.getFrame().setHardwareInUse(false);

                            // put shutter back to original state
                            core_.setShutterOpen(shutterOpen);
                            core_.setAutoShutter(autoShutter);

                            // make sure cameras aren't running anymore
                            if (core_.isSequenceRunning(firstCamera)) {
                                core_.stopSequenceAcquisition(firstCamera);
                            }
                            if ((twoSided || acqBothCameras) && core_.isSequenceRunning(secondCamera)) {
                                core_.stopSequenceAcquisition(secondCamera);
                            }

                            // make sure SPIM state machine on micromirror and SCAN of XY card are stopped (should normally be but sanity check)
                            if ((acqSettings.numSides > 1) || acqSettings.firstSideIsA) {
                                props_.setPropValue(Devices.Keys.GALVOA, Properties.Keys.SPIM_STATE,
                                        Properties.Values.SPIM_IDLE, true);
                            }
                            if ((acqSettings.numSides > 1) || !acqSettings.firstSideIsA) {
                                props_.setPropValue(Devices.Keys.GALVOB, Properties.Keys.SPIM_STATE,
                                        Properties.Values.SPIM_IDLE, true);
                            }
                            if (acqSettings.isStageScanning) {
                                props_.setPropValue(Devices.Keys.XYSTAGE, Properties.Keys.STAGESCAN_STATE,
                                        Properties.Values.SPIM_IDLE);
                            }
                        }
                    }

                    if (acqSettings.useMovementCorrection && (timePoint % correctMovementEachNFrames) == 0) {
                        if (movementDetectors[positionNum] == null) {
                            // Transform from camera space to stage space:
                            Rotation rotation = camBRotation;
                            if (firstSideA) {
                                rotation = camARotation;
                            }
                            movementDetectors[positionNum] = new MovementDetector(prefs_, acq_, cmChannelNumber,
                                    positionNum, rotation);
                        }

                        Vector3D movement = movementDetectors[positionNum]
                                .detectMovement(Method.PhaseCorrelation);

                        String msg1 = "TimePoint: " + timePoint + ", Detected movement.  X: " + movement.getX()
                                + ", Y: " + movement.getY() + ", Z: " + movement.getZ();
                        System.out.println(msg1);

                        if (!movement.equals(zeroPoint)) {
                            String msg = "ASIdiSPIM motion corrector moving stages: X: " + movement.getX()
                                    + ", Y: " + movement.getY() + ", Z: " + movement.getZ();
                            gui_.logMessage(msg);
                            System.out.println(msg);

                            // if we are using the position list, update the position in the list
                            if (acqSettings.useMultiPositions) {
                                MultiStagePosition position = positionList.getPosition(positionNum);
                                StagePosition pos = position.get(devices_.getMMDevice(Devices.Keys.XYSTAGE));
                                pos.x += movement.getX();
                                pos.y += movement.getY();
                                StagePosition zPos = position
                                        .get(devices_.getMMDevice(Devices.Keys.UPPERZDRIVE));
                                if (zPos != null) {
                                    zPos.x += movement.getZ();
                                }
                            } else {
                                // only a single position, move the stage now
                                core_.setRelativeXYPosition(devices_.getMMDevice(Devices.Keys.XYSTAGE),
                                        movement.getX(), movement.getY());
                                core_.setRelativePosition(devices_.getMMDevice(Devices.Keys.UPPERZDRIVE),
                                        movement.getZ());
                            }

                        }
                    }
                }
                if (acqSettings.hardwareTimepoints) {
                    break;
                }
            }
        } catch (IllegalMonitorStateException ex) {
            // do nothing, the acquisition was simply halted during its operation
            // will log error message during finally clause
        } catch (MMScriptException mex) {
            MyDialogUtils.showError(mex);
        } catch (Exception ex) {
            MyDialogUtils.showError(ex);
        } finally { // end of this acquisition (could be about to restart if separate viewers)
            try {
                // restore original window listeners
                try {
                    vad.getImagePlus().getWindow().removeWindowListener(wl_acq);
                    for (WindowListener l : wls_orig) {
                        vad.getImagePlus().getWindow().addWindowListener(l);
                    }
                } catch (Exception ex) {
                    // do nothing, window is probably gone
                }

                if (cancelAcquisition_.get()) {
                    ReportingUtils.logMessage("User stopped the acquisition");
                }

                bq.put(TaggedImageQueue.POISON);
                // TODO: evaluate closeAcquisition call
                // at the moment, the Micro-Manager api has a bug that causes 
                // a closed acquisition not be really closed, causing problems
                // when the user closes a window of the previous acquisition
                // changed r14705 (2014-11-24)
                // gui_.closeAcquisition(acqName);
                ReportingUtils.logMessage("diSPIM plugin acquisition " + acqName + " took: "
                        + (System.currentTimeMillis() - acqButtonStart) + "ms");

                //               while(gui_.isAcquisitionRunning()) {
                //                  Thread.sleep(10);
                //                  ReportingUtils.logMessage("waiting for acquisition to finish.");
                //               }

                // flag that we are done with acquisition
                acquisitionRunning_.set(false);

                // write acquisition settings if requested
                if (lastAcquisitionPath_ != null && prefs_.getBoolean(MyStrings.PanelNames.SETTINGS.toString(),
                        Properties.Keys.PLUGIN_WRITE_ACQ_SETTINGS_FILE, false)) {
                    String path = "";
                    try {
                        path = lastAcquisitionPath_ + File.separator + "AcqSettings.txt";
                        PrintWriter writer = new PrintWriter(path);
                        writer.println(acqSettingsJSON);
                        writer.flush();
                        writer.close();
                    } catch (Exception ex) {
                        MyDialogUtils.showError(ex,
                                "Could not save acquisition settings to file as requested to path " + path);
                    }
                }

            } catch (Exception ex) {
                // exception while stopping sequence acquisition, not sure what to do...
                MyDialogUtils.showError(ex, "Problem while finishing acquisition");
            }
        }

    } // for loop over acquisitions

    // cleanup after end of all acquisitions

    // TODO be more careful and always do these if we actually started acquisition, 
    // even if exception happened

    cameras_.setCameraForAcquisition(firstCameraKey, false);
    if (twoSided || acqBothCameras) {
        cameras_.setCameraForAcquisition(secondCameraKey, false);
    }

    // restore exposure times of SPIM cameras
    try {
        core_.setExposure(firstCamera, prefs_.getFloat(MyStrings.PanelNames.SETTINGS.toString(),
                Properties.Keys.PLUGIN_CAMERA_LIVE_EXPOSURE_FIRST.toString(), 10f));
        if (twoSided || acqBothCameras) {
            core_.setExposure(secondCamera, prefs_.getFloat(MyStrings.PanelNames.SETTINGS.toString(),
                    Properties.Keys.PLUGIN_CAMERA_LIVE_EXPOSURE_SECOND.toString(), 10f));
        }
        gui_.refreshGUIFromCache();
    } catch (Exception ex) {
        MyDialogUtils.showError("Could not restore exposure after acquisition");
    }

    // reset channel to original if we clobbered it
    if (acqSettings.useChannels) {
        multiChannelPanel_.setConfig(originalChannelConfig);
    }

    // clean up controller settings after acquisition
    // want to do this, even with demo cameras, so we can test everything else
    // TODO figure out if we really want to return piezos to 0 position (maybe center position,
    //   maybe not at all since we move when we switch to setup tab, something else??)
    controller_.cleanUpControllerAfterAcquisition(acqSettings.numSides, acqSettings.firstSideIsA, true);

    // if we did stage scanning restore its position and speed
    if (acqSettings.isStageScanning) {
        try {
            // make sure stage scanning state machine is stopped, otherwise setting speed/position won't take
            props_.setPropValue(Devices.Keys.XYSTAGE, Properties.Keys.STAGESCAN_STATE,
                    Properties.Values.SPIM_IDLE);
            props_.setPropValue(Devices.Keys.XYSTAGE, Properties.Keys.STAGESCAN_MOTOR_SPEED, origXSpeed);
            props_.setPropValue(Devices.Keys.XYSTAGE, Properties.Keys.STAGESCAN_MOTOR_ACCEL, origXAccel);
            core_.setXYPosition(devices_.getMMDevice(Devices.Keys.XYSTAGE), xyPosUm.x, xyPosUm.y);
        } catch (Exception ex) {
            MyDialogUtils.showError("Could not restore XY stage position after acquisition");
        }
    }

    updateAcquisitionStatus(AcquisitionStatus.DONE);
    posUpdater_.pauseUpdates(false);
    if (testAcq && prefs_.getBoolean(MyStrings.PanelNames.SETTINGS.toString(),
            Properties.Keys.PLUGIN_TESTACQ_SAVE, false)) {
        String path = "";
        try {
            path = prefs_.getString(MyStrings.PanelNames.SETTINGS.toString(),
                    Properties.Keys.PLUGIN_TESTACQ_PATH, "");
            IJ.saveAs(acq_.getAcquisitionWindow().getImagePlus(), "raw", path);
            // TODO consider generating a short metadata file to assist in interpretation
        } catch (Exception ex) {
            MyDialogUtils.showError("Could not save raw data from test acquisition to path " + path);
        }
    }

    if (separateImageFilesOriginally) {
        ImageUtils.setImageStorageClass(TaggedImageStorageDiskDefault.class);
    }

    // restore camera
    try {
        core_.setCameraDevice(originalCamera);
    } catch (Exception ex) {
        MyDialogUtils.showError("Could not restore camera after acquisition");
    }

    if (liveModeOriginally) {
        gui_.enableLiveMode(true);
    }

    if (nonfatalError) {
        MyDialogUtils.showError("Missed some images during acquisition, see core log for details");
    }

    return true;
}

From source file:org.micromanager.plugins.magellan.propsandcovariants.LaserPredNet.java

private static boolean isWithinSurace(SurfaceInterpolator surface, Vector3D point) throws InterruptedException {
    boolean defined = surface.waitForCurentInterpolation().isInterpDefined(point.getX(), point.getY());
    if (!defined) {
        return false;
    }/*from w w w.  j  a  v a  2s . co m*/
    float interpVal = surface.waitForCurentInterpolation().getInterpolatedValue(point.getX(), point.getY());
    return point.getZ() > interpVal;
}

From source file:org.orekit.attitudes.YawSteeringTest.java

@Test
public void testCompensAxis() throws OrekitException {

    //  Attitude laws
    // **************
    // Target pointing attitude provider over satellite nadir at date, without yaw compensation
    NadirPointing nadirLaw = new NadirPointing(circOrbit.getFrame(), earthShape);

    // Target pointing attitude provider with yaw compensation
    YawSteering yawCompensLaw = new YawSteering(circOrbit.getFrame(), nadirLaw, CelestialBodyFactory.getSun(),
            Vector3D.MINUS_I);/*from  w  w w.  j  a v  a2 s.  co m*/

    // Get attitude rotations from non yaw compensated / yaw compensated laws
    Rotation rotNoYaw = nadirLaw.getAttitude(circOrbit, date, circOrbit.getFrame()).getRotation();
    Rotation rotYaw = yawCompensLaw.getAttitude(circOrbit, date, circOrbit.getFrame()).getRotation();

    // Compose rotations composition
    Rotation compoRot = rotYaw.applyTo(rotNoYaw.revert());
    Vector3D yawAxis = compoRot.getAxis();

    // Check axis
    Assert.assertEquals(0., yawAxis.getX(), Utils.epsilonTest);
    Assert.assertEquals(0., yawAxis.getY(), Utils.epsilonTest);
    Assert.assertEquals(1., yawAxis.getZ(), Utils.epsilonTest);

}

From source file:org.orekit.bodies.Ellipsoid.java

/** Compute the 2D ellipse at the intersection of the 3D ellipsoid and a plane.
 * @param planePoint point belonging to the plane, in the ellipsoid frame
 * @param planeNormal normal of the plane, in the ellipsoid frame
 * @return plane section or null if there are no intersections
 *//*  ww w . ja  v a  2s. c  o m*/
public Ellipse getPlaneSection(final Vector3D planePoint, final Vector3D planeNormal) {

    // we define the points Q in the plane using two free variables  and  as:
    // Q = P +  u +  v
    // where u and v are two unit vectors belonging to the plane
    // Q belongs to the 3D ellipsoid so:
    // (xQ / a) + (yQ / b) + (zQ / c) = 1
    // combining both equations, we get:
    //   (xP + 2 xP ( xU +  xV) + ( xU +  xV)) / a
    // + (yP + 2 yP ( yU +  yV) + ( yU +  yV)) / b
    // + (zP + 2 zP ( zU +  zV) + ( zU +  zV)) / c
    // = 1
    // which can be rewritten:
    //   +   + 2   + 2   + 2   +  = 0
    // with
    //  =  xU  / a +  yU  / b +  zU  / c > 0
    //  =  xV  / a +  yV  / b +  zV  / c > 0
    //  = xU xV / a + yU yV / b + zU zV / c
    //  = xP xU / a + yP yU / b + zP zU / c
    //  = xP xV / a + yP yV / b + zP zV / c
    //  =  xP  / a +  yP  / b +  zP  / c - 1
    // this is the equation of a conic (here an ellipse)
    // Of course, we note that if the point P belongs to the ellipsoid
    // then  = 0 and the equation holds at point P since  = 0 and  = 0
    final Vector3D u = planeNormal.orthogonal();
    final Vector3D v = Vector3D.crossProduct(planeNormal, u).normalize();
    final double xUOa = u.getX() / a;
    final double yUOb = u.getY() / b;
    final double zUOc = u.getZ() / c;
    final double xVOa = v.getX() / a;
    final double yVOb = v.getY() / b;
    final double zVOc = v.getZ() / c;
    final double xPOa = planePoint.getX() / a;
    final double yPOb = planePoint.getY() / b;
    final double zPOc = planePoint.getZ() / c;
    final double alpha = xUOa * xUOa + yUOb * yUOb + zUOc * zUOc;
    final double beta = xVOa * xVOa + yVOb * yVOb + zVOc * zVOc;
    final double gamma = MathArrays.linearCombination(xUOa, xVOa, yUOb, yVOb, zUOc, zVOc);
    final double delta = MathArrays.linearCombination(xPOa, xUOa, yPOb, yUOb, zPOc, zUOc);
    final double epsilon = MathArrays.linearCombination(xPOa, xVOa, yPOb, yVOb, zPOc, zVOc);
    final double zeta = MathArrays.linearCombination(xPOa, xPOa, yPOb, yPOb, zPOc, zPOc, 1, -1);

    // reduce the general equation   +   + 2   + 2   + 2   +  = 0
    // to canonical form (/l) + (/m) = 1
    // using a coordinates change
    //        = C +  cos -  sin
    //        = C +  sin +  cos
    // or equivalently
    //        =   ( - C) cos + ( - C) sin
    //        = - ( - C) sin + ( - C) cos
    // C and C are the coordinates of the 2D ellipse center with respect to P
    // 2l and 2m and are the axes lengths (major or minor depending on which one is greatest)
    //  is the angle of the 2D ellipse axis corresponding to axis with length 2l

    // choose  in order to cancel the coupling term in 
    // expanding the general equation, we get: A  + B  + C  + D  + E  + F = 0
    // with C = 2[( - ) cos sin +  (cos - sin)]
    // hence the term is cancelled when  = arctan(t), with  t + ( - ) t -  = 0
    // As the solutions of the quadratic equation obey t?t = -1, they correspond to
    // angles  in quadrature to each other. Selecting one solution or the other simply
    // exchanges the principal axes. As we don't care about which axis we want as the
    // first one, we select an arbitrary solution
    final double tanTheta;
    if (FastMath.abs(gamma) < Precision.SAFE_MIN) {
        tanTheta = 0.0;
    } else {
        final double bMA = beta - alpha;
        tanTheta = (bMA >= 0) ? (-2 * gamma / (bMA + FastMath.sqrt(bMA * bMA + 4 * gamma * gamma)))
                : (-2 * gamma / (bMA - FastMath.sqrt(bMA * bMA + 4 * gamma * gamma)));
    }
    final double tan2 = tanTheta * tanTheta;
    final double cos2 = 1 / (1 + tan2);
    final double sin2 = tan2 * cos2;
    final double cosSin = tanTheta * cos2;
    final double cos = FastMath.sqrt(cos2);
    final double sin = tanTheta * cos;

    // choose C and C in order to cancel the linear terms in  and 
    // expanding the general equation, we get: A  + B  + C  + D  + E  + F = 0
    // with D = 2[ ( C +  C + ) cos + ( C +  C + ) sin]
    //      E = 2[-( C +  C + ) sin + ( C +  C + ) cos]
    //  can be eliminated by combining the equations
    // D cos - E sin = 2[ C +  C + ]
    // E cos + D sin = 2[ C +  C + ]
    // hence the terms D and E are both cancelled (regardless of ) when
    //     C = (  -  ) / ( -  )
    //     C = (  -  ) / ( -  )
    final double denom = MathArrays.linearCombination(gamma, gamma, -alpha, beta);
    final double tauC = MathArrays.linearCombination(beta, delta, -gamma, epsilon) / denom;
    final double nuC = MathArrays.linearCombination(alpha, epsilon, -gamma, delta) / denom;

    // compute l and m
    // expanding the general equation, we get: A  + B  + C  + D  + E  + F = 0
    // with A =  cos +  sin + 2  cos sin
    //      B =  sin +  cos - 2  cos sin
    //      F =  C +  C + 2  C C + 2  C + 2  C + 
    // hence we compute directly l = (-F/A) and m = (-F/B)
    final double twogcs = 2 * gamma * cosSin;
    final double bigA = alpha * cos2 + beta * sin2 + twogcs;
    final double bigB = alpha * sin2 + beta * cos2 - twogcs;
    final double bigF = (alpha * tauC + 2 * (gamma * nuC + delta)) * tauC + (beta * nuC + 2 * epsilon) * nuC
            + zeta;
    final double l = FastMath.sqrt(-bigF / bigA);
    final double m = FastMath.sqrt(-bigF / bigB);
    if (Double.isNaN(l + m)) {
        // the plane does not intersect the ellipsoid
        return null;
    }

    if (l > m) {
        return new Ellipse(new Vector3D(1, planePoint, tauC, u, nuC, v), new Vector3D(cos, u, sin, v),
                new Vector3D(-sin, u, cos, v), l, m, frame);
    } else {
        return new Ellipse(new Vector3D(1, planePoint, tauC, u, nuC, v), new Vector3D(sin, u, -cos, v),
                new Vector3D(cos, u, sin, v), m, l, frame);
    }

}

From source file:org.orekit.bodies.EllipsoidTest.java

private double errorOnEllipsoid(Ellipse ps, Ellipsoid ellipsoid) {
    double max = 0;
    for (double theta = 0; theta < 2 * FastMath.PI; theta += 0.1) {
        Vector3D p = ps.pointAt(theta);
        double xOa = p.getX() / ellipsoid.getA();
        double yOb = p.getY() / ellipsoid.getB();
        double zOc = p.getZ() / ellipsoid.getC();
        max = FastMath.max(max,/*  w w w  .j  a  v a 2  s  .c  om*/
                FastMath.abs(MathArrays.linearCombination(xOa, xOa, yOb, yOb, zOc, zOc, 1, -1)));
    }
    return max;
}