Skip to content

calibrate

Calibration workflows for brazilian_disk and coupon_test experiments.

This module provides:

  • Calibration profile loading/validation
  • Dark/blank scalar correction estimation
  • Robust fitting of per-channel stress-optic coefficients (C)
  • Fitting of incoming Stokes state (S_i_hat)
  • End-to-end calibration execution and report/profile output

calibrate

Calibration workflows for photoelastimetry experiments.

This module fits per-wavelength stress-optic coefficients (C) for an explicit fixed incoming polarisation state (S_i_hat), and optional detector blank correction from a multi-load calibration sequence.

Supported calibration methods: - brazilian_disk: diametrically-loaded disk with analytical stress field. - coupon_test: uniaxial coupon with nominal stress in a gauge ROI.

Functions

load_calibration_profile(profile_file)

Load and validate a calibration profile.

Parameters:

Name Type Description Default
profile_file str

Path to calibration JSON/JSON5 profile.

required

Returns:

Type Description
dict

Validated calibration profile.

Source code in photoelastimetry/calibrate.py
def load_calibration_profile(profile_file):
    """
    Load and validate a calibration profile.

    Parameters
    ----------
    profile_file : str
        Path to calibration JSON/JSON5 profile.

    Returns
    -------
    dict
        Validated calibration profile.
    """
    if not os.path.exists(profile_file):
        raise ValueError(f"Calibration file not found: {profile_file}")

    with open(profile_file, "r") as f:
        profile = json5.load(f)

    required = {
        "version",
        "method",
        "wavelengths",
        "C",
        "S_i_hat",
        "blank_correction",
        "fit_metrics",
        "provenance",
    }
    missing = sorted(required.difference(profile.keys()))
    if missing:
        raise ValueError(f"Calibration profile missing required keys: {missing}")

    supported_methods = {"brazilian_disk", "coupon_test"}
    if profile["method"] not in supported_methods:
        raise ValueError(
            f"Unsupported calibration method '{profile['method']}'. "
            f"Supported methods: {sorted(supported_methods)}."
        )

    wavelengths = _normalise_wavelengths(profile["wavelengths"])
    C = _as_float_array(profile["C"], expected_length=wavelengths.size, name="C")

    S_i_hat = np.asarray(profile["S_i_hat"], dtype=float)
    if S_i_hat.size == 2:
        S_i_hat = np.append(S_i_hat, 0.0)
    if S_i_hat.size != 3:
        raise ValueError(f"S_i_hat must have length 2 or 3, got {S_i_hat.size}.")

    blank = profile["blank_correction"]
    if "offset" not in blank or "scale" not in blank:
        raise ValueError("blank_correction must contain 'offset' and 'scale'.")

    offset = np.asarray(blank["offset"], dtype=float)
    scale = np.asarray(blank["scale"], dtype=float)
    if offset.shape != (wavelengths.size, 4) or scale.shape != (wavelengths.size, 4):
        raise ValueError(
            "blank_correction offset/scale must have shape "
            f"({wavelengths.size}, 4); got {offset.shape} and {scale.shape}."
        )

    validated = dict(profile)
    validated["wavelengths"] = wavelengths.tolist()
    validated["C"] = C.tolist()
    validated["S_i_hat"] = S_i_hat.tolist()
    return validated

compute_blank_correction(dark_frame, blank_frame, eps=1e-06)

Compute per-channel/per-angle scalar blank correction coefficients.

Parameters:

Name Type Description Default
dark_frame ndarray

Dark reference image stack [H, W, C, 4].

required
blank_frame ndarray

Blank reference image stack [H, W, C, 4].

required
eps float

Minimum denominator for stability.

1e-06

Returns:

Type Description
dict

Dictionary with offset and scale arrays, each shape [C, 4].

Source code in photoelastimetry/calibrate.py
def compute_blank_correction(dark_frame, blank_frame, eps=1e-6):
    """
    Compute per-channel/per-angle scalar blank correction coefficients.

    Parameters
    ----------
    dark_frame : ndarray
        Dark reference image stack [H, W, C, 4].
    blank_frame : ndarray
        Blank reference image stack [H, W, C, 4].
    eps : float
        Minimum denominator for stability.

    Returns
    -------
    dict
        Dictionary with `offset` and `scale` arrays, each shape [C, 4].
    """
    dark = np.asarray(dark_frame, dtype=float)
    blank = np.asarray(blank_frame, dtype=float)

    if dark.shape != blank.shape:
        raise ValueError(f"Dark and blank frame shapes must match. Got {dark.shape} and {blank.shape}.")
    if dark.ndim != 4 or dark.shape[-1] != 4:
        raise ValueError(f"Blank correction frames must have shape [H, W, C, 4], got {dark.shape}.")

    offset = np.median(dark, axis=(0, 1))
    denom = np.median(blank, axis=(0, 1)) - offset

    if np.any(denom <= eps):
        raise ValueError("Blank correction denominator is non-positive for at least one channel/polariser.")

    scale = 1.0 / denom
    return {
        "offset": offset.tolist(),
        "scale": scale.tolist(),
        "mode": "dark_blank_scalar",
        "eps": float(eps),
    }

apply_blank_correction(data, blank_correction)

Apply per-channel/per-angle blank correction to an image stack.

Parameters:

Name Type Description Default
data ndarray

Image data [H, W, C, 4].

required
blank_correction dict

Blank correction dictionary containing offset and scale.

required

Returns:

Type Description
ndarray

Corrected image stack with same shape as input.

Source code in photoelastimetry/calibrate.py
def apply_blank_correction(data, blank_correction):
    """
    Apply per-channel/per-angle blank correction to an image stack.

    Parameters
    ----------
    data : ndarray
        Image data [H, W, C, 4].
    blank_correction : dict
        Blank correction dictionary containing `offset` and `scale`.

    Returns
    -------
    ndarray
        Corrected image stack with same shape as input.
    """
    arr = np.asarray(data, dtype=float)
    if arr.ndim != 4 or arr.shape[-1] != 4:
        raise ValueError(f"Image data must have shape [H, W, C, 4], got {arr.shape}.")

    if blank_correction is None:
        return arr

    if "offset" not in blank_correction or "scale" not in blank_correction:
        raise ValueError("blank_correction must contain 'offset' and 'scale'.")

    offset = np.asarray(blank_correction["offset"], dtype=float)
    scale = np.asarray(blank_correction["scale"], dtype=float)

    if offset.ndim != 2 or scale.ndim != 2 or offset.shape[1] != 4 or scale.shape[1] != 4:
        raise ValueError(
            "blank_correction offset/scale must each have shape [n_wavelengths, 4]. "
            f"Got {offset.shape} and {scale.shape}."
        )

    if offset.shape[0] != arr.shape[2] or scale.shape[0] != arr.shape[2]:
        raise ValueError(
            "blank_correction wavelength count must match data channels. "
            f"Got correction {offset.shape[0]} and data {arr.shape[2]}."
        )

    corrected = (arr - offset[np.newaxis, np.newaxis, :, :]) * scale[np.newaxis, np.newaxis, :, :]
    return corrected

validate_calibration_config(config)

Validate and normalise calibration configuration.

Parameters:

Name Type Description Default
config dict

Calibration configuration loaded from JSON5.

required

Returns:

Type Description
dict

Normalised configuration with defaults.

Source code in photoelastimetry/calibrate.py
def validate_calibration_config(config):
    """
    Validate and normalise calibration configuration.

    Parameters
    ----------
    config : dict
        Calibration configuration loaded from JSON5.

    Returns
    -------
    dict
        Normalised configuration with defaults.
    """
    method = config.get("method", "brazilian_disk")
    supported_methods = {"brazilian_disk", "coupon_test"}
    if method not in supported_methods:
        raise ValueError(f"Unsupported method='{method}'. Supported methods are {sorted(supported_methods)}.")

    if "wavelengths" not in config:
        raise ValueError("Calibration config must include 'wavelengths'.")
    wavelengths = _normalise_wavelengths(config["wavelengths"])

    if "thickness" not in config:
        raise ValueError("Calibration config must include 'thickness'.")
    thickness = float(config["thickness"])
    if thickness <= 0:
        raise ValueError("thickness must be positive.")

    geometry = dict(config.get("geometry", {}))
    if method == "brazilian_disk":
        for key in ("radius_mm", "radius_px", "center_px"):
            if key not in geometry:
                raise ValueError(f"geometry must include '{key}' for method='brazilian_disk'.")

        radius_mm = float(geometry["radius_mm"])
        radius_px = float(geometry["radius_px"])
        center_px = np.asarray(geometry["center_px"], dtype=float)
        radius_m = radius_mm * 1e-3

        if radius_mm <= 0:
            raise ValueError("geometry.radius_mm must be positive.")
        if radius_px <= 0:
            raise ValueError("geometry.radius_px must be positive.")
        if radius_m <= 0:
            raise ValueError("geometry.radius_mm must be positive.")

        pixels_per_meter = radius_px / radius_m
        if pixels_per_meter <= 0:
            raise ValueError("Derived pixels_per_meter must be positive from geometry.radius_mm/radius_px.")
        if center_px.size != 2:
            raise ValueError("geometry.center_px must have two elements [cx, cy].")

        edge_margin_fraction = float(geometry.get("edge_margin_fraction", 0.9))
        contact_exclusion_fraction = float(geometry.get("contact_exclusion_fraction", 0.12))
        if not (0 < edge_margin_fraction <= 1):
            raise ValueError("geometry.edge_margin_fraction must be in (0, 1].")
        if not (0 <= contact_exclusion_fraction < 1):
            raise ValueError("geometry.contact_exclusion_fraction must be in [0, 1).")

        geometry_validated = {
            "radius_mm": radius_mm,
            "radius_px": radius_px,
            "radius_m": radius_m,
            "center_px": center_px,
            "pixels_per_meter": pixels_per_meter,
            "edge_margin_fraction": edge_margin_fraction,
            "contact_exclusion_fraction": contact_exclusion_fraction,
        }
    else:
        for key in ("gauge_roi_px", "coupon_width_m"):
            if key not in geometry:
                raise ValueError(f"geometry must include '{key}' for method='coupon_test'.")

        gauge_roi_px = np.asarray(geometry["gauge_roi_px"], dtype=int)
        if gauge_roi_px.size != 4:
            raise ValueError("geometry.gauge_roi_px must be [x0, x1, y0, y1].")
        x0, x1, y0, y1 = [int(v) for v in gauge_roi_px]
        if x0 >= x1 or y0 >= y1:
            raise ValueError("geometry.gauge_roi_px must satisfy x0<x1 and y0<y1.")

        coupon_width_m = float(geometry["coupon_width_m"])
        if coupon_width_m <= 0:
            raise ValueError("geometry.coupon_width_m must be positive.")

        load_axis = str(geometry.get("load_axis", "x")).lower()
        if load_axis not in {"x", "y"}:
            raise ValueError("geometry.load_axis must be 'x' or 'y'.")

        transverse_stress_ratio = float(geometry.get("transverse_stress_ratio", 0.0))
        if not (-1.0 <= transverse_stress_ratio <= 1.0):
            raise ValueError("geometry.transverse_stress_ratio must be in [-1, 1].")

        roi_margin_px = int(geometry.get("roi_margin_px", 0))
        if roi_margin_px < 0:
            raise ValueError("geometry.roi_margin_px must be >= 0.")

        geometry_validated = {
            "gauge_roi_px": np.array([x0, x1, y0, y1], dtype=int),
            "coupon_width_m": coupon_width_m,
            "load_axis": load_axis,
            "transverse_stress_ratio": transverse_stress_ratio,
            "roi_margin_px": roi_margin_px,
        }

    load_steps = list(config.get("load_steps", []))
    if len(load_steps) == 0:
        raise ValueError("Calibration requires at least one load_step.")

    normalised_steps = []
    for step in load_steps:
        if "image_file" not in step or "load" not in step:
            raise ValueError("Each load step must include 'image_file' and 'load'.")
        image_file = step["image_file"]
        if not os.path.exists(image_file):
            raise ValueError(f"Calibration load-step image not found: {image_file}")
        normalised_steps.append({"image_file": image_file, "load": float(step["load"])})

    load_zero_tolerance = float(config.get("load_zero_tolerance", 1e-9))
    n_no_load = sum(abs(step["load"]) <= load_zero_tolerance for step in normalised_steps)
    n_loaded = sum(abs(step["load"]) > load_zero_tolerance for step in normalised_steps)
    if len(normalised_steps) < 4:
        warnings.warn(
            "Calibration usually benefits from at least 4 load_steps including a no-load image; "
            "continuing with the available data may reduce robustness.",
            stacklevel=2,
        )
    if n_no_load < 1:
        warnings.warn(
            "Calibration usually benefits from at least one no-load step (load ≈ 0); "
            "continuing without it may reduce robustness.",
            stacklevel=2,
        )
    if n_loaded < 3:
        warnings.warn(
            "Calibration usually benefits from at least three non-zero load steps; "
            "continuing with fewer loaded states may reduce identifiability and fit stability.",
            stacklevel=2,
        )

    fit_cfg = dict(config.get("fit", {}))
    if "initial_S_i_hat" in fit_cfg:
        raise ValueError("fit.initial_S_i_hat is no longer supported; use fit.S_i_hat.")
    if "S_i_hat" not in fit_cfg:
        raise ValueError(
            "Calibration config must include fit.S_i_hat as a 3-component array " "[S1_hat, S2_hat, S3_hat]."
        )
    fit_s_i_hat = np.asarray(fit_cfg["S_i_hat"], dtype=float)
    if fit_s_i_hat.shape != (3,):
        raise ValueError(f"fit.S_i_hat must have shape (3,), got {fit_s_i_hat.shape}.")

    fit_cfg.setdefault("max_points", 6000)
    fit_cfg.setdefault("loss", "soft_l1")
    fit_cfg.setdefault("f_scale", 0.05)
    fit_cfg.setdefault("max_nfev", 300)
    fit_cfg.setdefault("seed", 0)
    fit_cfg.setdefault("prior_weight", 0.0)
    fit_cfg["S_i_hat"] = fit_s_i_hat.tolist()

    output_profile = config.get("output_profile", "calibration_profile.json5")
    output_report = config.get("output_report", "calibration_report.md")
    output_diagnostics = config.get("output_diagnostics", "calibration_diagnostics.npz")

    validated = {
        "method": method,
        "wavelengths": wavelengths,
        "thickness": thickness,
        "nu": float(config.get("nu", 1.0)),
        "geometry": geometry_validated,
        "load_steps": normalised_steps,
        "load_zero_tolerance": load_zero_tolerance,
        "dark_frame_file": config.get("dark_frame_file"),
        "blank_frame_file": config.get("blank_frame_file"),
        "fit": fit_cfg,
        "output_profile": output_profile,
        "output_report": output_report,
        "output_diagnostics": output_diagnostics,
        "provenance": dict(config.get("provenance", {})),
    }

    return validated

interactive_geometry_wizard(config)

Interactively pick geometry from the first load-step image.

  • brazilian_disk: click center, then one edge point on the disk.
  • coupon_test: click top-left then bottom-right gauge ROI corners.
Source code in photoelastimetry/calibrate.py
def interactive_geometry_wizard(config):
    """
    Interactively pick geometry from the first load-step image.

    - brazilian_disk: click center, then one edge point on the disk.
    - coupon_test: click top-left then bottom-right gauge ROI corners.
    """
    method = config.get("method", "brazilian_disk")
    if method not in {"brazilian_disk", "coupon_test"}:
        raise ValueError(f"Unsupported method='{method}' for interactive wizard.")

    steps = list(config.get("load_steps", []))
    if len(steps) == 0:
        raise ValueError("Interactive wizard requires at least one load step.")
    first_image = steps[0].get("image_file")
    if first_image is None:
        raise ValueError("First load step must include 'image_file'.")
    if not os.path.exists(first_image):
        raise ValueError(f"Load-step image not found: {first_image}")

    # Use the same preprocessing path as calibration dataset construction so
    # interactive geometry coordinates match model coordinates exactly.
    data = _load_and_validate_image(first_image, expected_shape=None)
    preview = _build_preview_image(data)

    import matplotlib.pyplot as plt

    cfg = deepcopy(config)
    geometry = dict(cfg.get("geometry", {}))

    fig, ax = plt.subplots(figsize=(9, 7))
    fig.subplots_adjust(bottom=0.16)
    ax.imshow(preview, cmap="gray")
    ax.set_title("Calibration Geometry Wizard")
    ax.set_axis_off()
    if method == "brazilian_disk":
        from matplotlib.patches import Circle
        from matplotlib.widgets import Button

        radius_mm = float(geometry.get("radius_mm", 0.0))
        if radius_mm <= 0:
            raise ValueError(
                "geometry.radius_mm is required and must be positive for interactive disk calibration."
            )
        instruction = ax.text(
            0.01,
            0.99,
            "Left-click circumference points (>=3). Right-click to undo.\nClick Done when the overlay circle matches.",
            transform=ax.transAxes,
            va="top",
            ha="left",
            fontsize=9,
            color="white",
            bbox={"facecolor": "black", "alpha": 0.45, "pad": 4},
        )
        _ = instruction

        points = []
        point_scatter = ax.scatter([], [], c="yellow", s=24)
        circle_patch = Circle((0, 0), radius=1.0, fill=False, edgecolor="cyan", linewidth=2, visible=False)
        ax.add_patch(circle_patch)
        roi_contour = None
        fit_text = ax.text(
            0.01,
            0.05,
            "",
            transform=ax.transAxes,
            va="bottom",
            ha="left",
            fontsize=9,
            color="white",
            bbox={"facecolor": "black", "alpha": 0.45, "pad": 3},
        )

        state = {"accepted": False, "fit": None}

        def _update_overlay():
            nonlocal roi_contour
            if len(points) == 0:
                point_scatter.set_offsets(np.empty((0, 2)))
            else:
                point_scatter.set_offsets(np.asarray(points, dtype=float))
            if len(points) >= 3:
                try:
                    cx, cy, radius_px = _fit_circle_from_points(points)
                    circle_patch.center = (cx, cy)
                    circle_patch.radius = radius_px
                    circle_patch.set_visible(True)
                    radius_m = radius_mm * 1e-3
                    ppm = radius_px / radius_m
                    geom_candidate = {
                        "radius_mm": radius_mm,
                        "radius_px": radius_px,
                        "radius_m": radius_m,
                        "center_px": np.array([cx, cy], dtype=float),
                        "pixels_per_meter": ppm,
                        "edge_margin_fraction": float(geometry.get("edge_margin_fraction", 0.9)),
                        "contact_exclusion_fraction": float(geometry.get("contact_exclusion_fraction", 0.12)),
                    }
                    roi_pixels = _disk_roi_pixel_count(preview.shape[0], preview.shape[1], geom_candidate)
                    fit_text.set_text(
                        f"n={len(points)}  center=({cx:.1f}, {cy:.1f})  radius={radius_px:.1f}px ({radius_mm:.3f} mm)  roi_pixels={roi_pixels}"
                    )
                    state["fit"] = (cx, cy, radius_px, roi_pixels)

                    if roi_contour is not None:
                        for coll in roi_contour.collections:
                            coll.remove()
                    _, _, _, roi_mask = _build_disk_coordinates(
                        preview.shape[0], preview.shape[1], geom_candidate
                    )
                    roi_contour = ax.contour(
                        roi_mask.astype(float), levels=[0.5], colors="lime", linewidths=1.0
                    )
                except ValueError as exc:
                    circle_patch.set_visible(False)
                    fit_text.set_text(str(exc))
                    state["fit"] = None
                    if roi_contour is not None:
                        for coll in roi_contour.collections:
                            coll.remove()
                        roi_contour = None
            else:
                circle_patch.set_visible(False)
                fit_text.set_text("Need at least 3 points.")
                state["fit"] = None
                if roi_contour is not None:
                    for coll in roi_contour.collections:
                        coll.remove()
                    roi_contour = None
            fig.canvas.draw_idle()

        def _on_click(event):
            if event.inaxes != ax or event.xdata is None or event.ydata is None:
                return
            if event.button == 1:
                points.append((float(event.xdata), float(event.ydata)))
            elif event.button == 3 and len(points) > 0:
                points.pop()
            else:
                return
            _update_overlay()

        def _on_done(_event):
            if state["fit"] is None:
                fit_text.set_text("Need a valid circle fit before Done.")
                fig.canvas.draw_idle()
                return
            if state["fit"][3] <= 0:
                fit_text.set_text("ROI is empty for this circle. Add/adjust circumference points.")
                fig.canvas.draw_idle()
                return
            state["accepted"] = True
            plt.close(fig)

        def _on_reset(_event):
            points.clear()
            _update_overlay()

        done_ax = fig.add_axes([0.80, 0.03, 0.16, 0.07])
        done_btn = Button(done_ax, "Done")
        done_btn.on_clicked(_on_done)

        reset_ax = fig.add_axes([0.62, 0.03, 0.16, 0.07])
        reset_btn = Button(reset_ax, "Reset")
        reset_btn.on_clicked(_on_reset)

        fig.canvas.mpl_connect("button_press_event", _on_click)
        _update_overlay()
        plt.show()

        if not state["accepted"] or state["fit"] is None:
            raise ValueError("Interactive selection canceled. Circle was not accepted.")

        cx, cy, radius_px, _roi_pixels = state["fit"]
        geometry["center_px"] = [cx, cy]
        geometry["radius_px"] = radius_px
    else:
        points = plt.ginput(2, timeout=0)
        plt.close(fig)
        if len(points) != 2:
            raise ValueError("Interactive selection canceled. Please provide exactly two clicks.")
        (x0, y0), (x1, y1) = points
        xa, xb = sorted([int(round(x0)), int(round(x1))])
        ya, yb = sorted([int(round(y0)), int(round(y1))])
        if xa == xb or ya == yb:
            raise ValueError("Selected ROI must have non-zero width and height.")
        geometry["gauge_roi_px"] = [xa, xb, ya, yb]

    cfg["geometry"] = geometry
    return cfg

calibration_residuals(params, dataset, fixed_s_i_hat)

Compute calibration residuals for least-squares fitting.

Parameters:

Name Type Description Default
params ndarray

1D parameter vector containing one C value per wavelength channel.

required
dataset dict

Dataset dictionary from _build_dataset.

required
fixed_s_i_hat array - like

Fixed source state used for the forward model.

required

Returns:

Type Description
ndarray

1D residual vector.

Source code in photoelastimetry/calibrate.py
def calibration_residuals(params, dataset, fixed_s_i_hat):
    """
    Compute calibration residuals for least-squares fitting.

    Parameters
    ----------
    params : ndarray
        1D parameter vector containing one C value per wavelength channel.
    dataset : dict
        Dataset dictionary from `_build_dataset`.
    fixed_s_i_hat : array-like
        Fixed source state used for the forward model.

    Returns
    -------
    ndarray
        1D residual vector.
    """
    wavelengths = dataset["wavelengths"]
    C = np.asarray(params[: wavelengths.size], dtype=float)
    s_i_hat = _normalise_s_i_hat(fixed_s_i_hat)

    residual_chunks = []
    for measured, sigma_xx, sigma_yy, sigma_xy in zip(
        dataset["measured_steps"],
        dataset["sigma_xx_steps"],
        dataset["sigma_yy_steps"],
        dataset["sigma_xy_steps"],
    ):
        for c, wl in enumerate(wavelengths):
            pred = predict_stokes(
                sigma_xx,
                sigma_yy,
                sigma_xy,
                C[c],
                dataset["nu"],
                dataset["thickness"],
                wl,
                s_i_hat,
            )
            residual_chunks.append((pred - measured[:, c, :]).ravel())

    if len(residual_chunks) == 0:
        return np.zeros(1, dtype=float)

    return np.concatenate(residual_chunks, axis=0)

fit_calibration_parameters(dataset, fit_config)

Fit C from calibration dataset with fixed S_i_hat.

Parameters:

Name Type Description Default
dataset dict

Dataset dictionary from _build_dataset.

required
fit_config dict

Fitting options.

required

Returns:

Type Description
dict

Fit results containing estimated C, fixed S_i_hat, metrics, and optimizer state.

Source code in photoelastimetry/calibrate.py
def fit_calibration_parameters(dataset, fit_config):
    """
    Fit C from calibration dataset with fixed S_i_hat.

    Parameters
    ----------
    dataset : dict
        Dataset dictionary from `_build_dataset`.
    fit_config : dict
        Fitting options.

    Returns
    -------
    dict
        Fit results containing estimated C, fixed S_i_hat, metrics, and optimizer state.
    """
    wavelengths = dataset["wavelengths"]
    n_channels = wavelengths.size

    init_c = _as_float_array(
        fit_config.get("initial_C", np.full(n_channels, 3e-9)), expected_length=n_channels, name="initial_C"
    )
    fixed_s_i_hat = _resolve_fixed_s_i_hat(fit_config)

    c_relative_bounds = fit_config.get("c_relative_bounds")
    if c_relative_bounds is not None:
        if len(c_relative_bounds) != 2:
            raise ValueError("fit.c_relative_bounds must be [lower_factor, upper_factor].")
        lower_factor = float(c_relative_bounds[0])
        upper_factor = float(c_relative_bounds[1])
        if lower_factor <= 0 or upper_factor <= 0 or lower_factor >= upper_factor:
            raise ValueError("fit.c_relative_bounds factors must satisfy 0 < lower < upper.")
        c_lower = np.maximum(init_c * lower_factor, 1e-15)
        c_upper = np.maximum(init_c * upper_factor, c_lower + 1e-15)
    else:
        c_lower = np.full(n_channels, 1e-15)
        c_upper = np.full(n_channels, 1e-4)

    x0 = init_c.copy()
    prior_weight = float(fit_config.get("prior_weight", 0.0))

    def residual_c_only(params):
        residual = calibration_residuals(params, dataset, fixed_s_i_hat=fixed_s_i_hat)
        if prior_weight > 0:
            prior = prior_weight * (params - x0)
            return np.concatenate([residual, prior], axis=0)
        return residual

    result = least_squares(
        residual_c_only,
        x0,
        bounds=(c_lower, c_upper),
        loss=fit_config["loss"],
        f_scale=float(fit_config["f_scale"]),
        max_nfev=int(fit_config["max_nfev"]),
    )
    C_fit = np.asarray(result.x[:n_channels], dtype=float)
    S_fit = fixed_s_i_hat

    residual = result.fun
    fit_metrics = {
        "success": bool(result.success),
        "status": int(result.status),
        "message": str(result.message),
        "cost": float(result.cost),
        "rmse": float(np.sqrt(np.mean(residual**2))),
        "mae": float(np.mean(np.abs(residual))),
        "n_residuals": int(residual.size),
        "n_samples": int(dataset["sample_y"].size),
        "n_load_steps": int(dataset["loads"].size),
    }

    return {
        "C": C_fit,
        "S_i_hat": S_fit,
        "fit_metrics": fit_metrics,
        "optimizer_result": result,
    }

run_calibration(config)

Run full calibration workflow from config.

Parameters:

Name Type Description Default
config dict

Calibration input configuration.

required

Returns:

Type Description
dict

Result dictionary containing the calibration profile and metadata.

Source code in photoelastimetry/calibrate.py
def run_calibration(config):
    """
    Run full calibration workflow from config.

    Parameters
    ----------
    config : dict
        Calibration input configuration.

    Returns
    -------
    dict
        Result dictionary containing the calibration profile and metadata.
    """
    cfg = validate_calibration_config(config)
    dataset = _build_dataset(cfg)
    fit_result = fit_calibration_parameters(dataset, cfg["fit"])

    output_profile = cfg["output_profile"]
    output_report = cfg["output_report"]
    output_diagnostics = cfg["output_diagnostics"]
    report_root, report_ext = os.path.splitext(output_report)
    output_diagnostics_plot = f"{report_root}_fit.png" if report_ext else f"{output_report}_fit.png"

    output_dir = os.path.dirname(output_profile)
    if output_dir:
        os.makedirs(output_dir, exist_ok=True)

    for path in (output_report, output_diagnostics, output_diagnostics_plot):
        folder = os.path.dirname(path)
        if folder:
            os.makedirs(folder, exist_ok=True)

    profile_dir = os.path.dirname(output_profile) if os.path.dirname(output_profile) else os.getcwd()

    provenance_steps = [
        {
            "load": float(step["load"]),
            "image_file": _safe_relative_path(step["image_file"], profile_dir),
        }
        for step in cfg["load_steps"]
    ]

    provenance = {
        "generated_utc": datetime.now(timezone.utc).isoformat(),
        "dark_frame_file": _safe_relative_path(cfg["dark_frame_file"], profile_dir),
        "blank_frame_file": _safe_relative_path(cfg["blank_frame_file"], profile_dir),
        "load_steps": provenance_steps,
        "diagnostics_file": _safe_relative_path(output_diagnostics, profile_dir),
        "diagnostics_plot_file": _safe_relative_path(output_diagnostics_plot, profile_dir),
    }
    provenance.update(cfg["provenance"])

    profile = {
        "version": 1,
        "method": cfg["method"],
        "wavelengths": cfg["wavelengths"].tolist(),
        "C": fit_result["C"].tolist(),
        "S_i_hat": fit_result["S_i_hat"].tolist(),
        "blank_correction": dataset["blank_correction"],
        "fit_metrics": fit_result["fit_metrics"],
        "provenance": provenance,
    }

    with open(output_profile, "w") as f:
        json.dump(profile, f, indent=2)

    _write_visual_diagnostics_plot(output_diagnostics_plot, dataset, fit_result)
    _write_report(output_report, profile, cfg)
    visual_maps = _build_visual_diagnostics(dataset, fit_result)

    np.savez(
        output_diagnostics,
        roi_mask=dataset["roi_mask"],
        model_mask=dataset["model_mask"],
        disk_mask=dataset["disk_mask"],
        sample_y=dataset["sample_y"],
        sample_x=dataset["sample_x"],
        loads=dataset["loads"],
        C=np.asarray(profile["C"], dtype=float),
        S_i_hat=np.asarray(profile["S_i_hat"], dtype=float),
        residual=fit_result["optimizer_result"].fun,
        diagnostic_channel=np.array([visual_maps["channel"]], dtype=int),
        measured_i0=visual_maps["measured_i0"],
        predicted_i0=visual_maps["predicted_i0"],
        i0_residual_abs=visual_maps["i0_residual_abs"],
        measured_s1=visual_maps["measured_s1"],
        predicted_s1=visual_maps["predicted_s1"],
        stokes_residual_mag=visual_maps["stokes_residual_mag"],
    )

    return {
        "profile": profile,
        "profile_file": output_profile,
        "report_file": output_report,
        "diagnostics_file": output_diagnostics,
        "diagnostics_plot_file": output_diagnostics_plot,
    }