Skip to content

Benchmark Metrics

Metrics

openlithohub.benchmark.metrics.epe

Edge Placement Error (EPE) computation.

compute_epe(predicted, target, pixel_size_nm=1.0)

Compute Edge Placement Error between predicted and target contours.

Extracts edges from both binary masks via Sobel operators, then computes the minimum Euclidean distance from each predicted edge pixel to the nearest target edge pixel.

Parameters:

Name Type Description Default
predicted Tensor

Binary mask of predicted pattern (H, W), values in {0, 1}.

required
target Tensor

Binary mask of target/reference pattern (H, W), values in {0, 1}.

required
pixel_size_nm float

Physical size of each pixel in nanometers.

1.0

Returns:

Type Description
dict[str, float]

Dictionary with 'epe_mean_nm', 'epe_max_nm', 'epe_std_nm'.

Source code in src/openlithohub/benchmark/metrics/epe.py
def compute_epe(
    predicted: torch.Tensor,
    target: torch.Tensor,
    pixel_size_nm: float = 1.0,
) -> dict[str, float]:
    """Compute Edge Placement Error between predicted and target contours.

    Extracts edges from both binary masks via Sobel operators, then computes
    the minimum Euclidean distance from each predicted edge pixel to the
    nearest target edge pixel.

    Args:
        predicted: Binary mask of predicted pattern (H, W), values in {0, 1}.
        target: Binary mask of target/reference pattern (H, W), values in {0, 1}.
        pixel_size_nm: Physical size of each pixel in nanometers.

    Returns:
        Dictionary with 'epe_mean_nm', 'epe_max_nm', 'epe_std_nm'.
    """
    if predicted.shape != target.shape:
        raise ValueError(f"Shape mismatch: predicted {predicted.shape} vs target {target.shape}")

    pred_edges = _extract_edges(predicted)
    tgt_edges = _extract_edges(target)

    pred_pts = pred_edges.nonzero(as_tuple=False).float()
    tgt_pts = tgt_edges.nonzero(as_tuple=False).float()

    if pred_pts.numel() == 0 or tgt_pts.numel() == 0:
        return {"epe_mean_nm": 0.0, "epe_max_nm": 0.0, "epe_std_nm": 0.0}

    # Compute pairwise distances in chunks to limit memory usage.
    chunk_size = 4096
    min_dists = []
    for i in range(0, pred_pts.shape[0], chunk_size):
        chunk = pred_pts[i : i + chunk_size]
        dists = torch.cdist(chunk, tgt_pts)
        min_dists.append(dists.min(dim=1).values)

    min_distances = torch.cat(min_dists) * pixel_size_nm

    return {
        "epe_mean_nm": float(min_distances.mean().item()),
        "epe_max_nm": float(min_distances.max().item()),
        "epe_std_nm": float(min_distances.std().item()) if min_distances.numel() > 1 else 0.0,
    }

openlithohub.benchmark.metrics.pvband

Process Variation Band (PV Band) computation.

compute_pvband(mask, nominal_dose=1.0, dose_variation=0.05, defocus_range_nm=20.0, pixel_size_nm=1.0)

Compute Process Variation Band width for a given mask.

PV Band measures the area between resist contours at process window extremes. Uses a simplified Gaussian forward model to simulate aerial images at different dose/focus corners, then computes the band between the outer (union) and inner (intersection) resist envelopes.

Source code in src/openlithohub/benchmark/metrics/pvband.py
def compute_pvband(
    mask: torch.Tensor,
    nominal_dose: float = 1.0,
    dose_variation: float = 0.05,
    defocus_range_nm: float = 20.0,
    pixel_size_nm: float = 1.0,
) -> dict[str, float]:
    """Compute Process Variation Band width for a given mask.

    PV Band measures the area between resist contours at process window extremes.
    Uses a simplified Gaussian forward model to simulate aerial images at
    different dose/focus corners, then computes the band between the outer
    (union) and inner (intersection) resist envelopes.
    """
    m = ensure_2d(mask)
    binary = (m > 0.5).float()

    sigma_nominal = 2.0
    sigma_defocus = defocus_range_nm / (2.0 * pixel_size_nm)

    dose_high = nominal_dose * (1.0 + dose_variation)
    dose_low = nominal_dose * (1.0 - dose_variation)
    sigma_high = sigma_nominal + sigma_defocus
    sigma_low = max(0.5, sigma_nominal - sigma_defocus * 0.5)

    corners = [
        (dose_high, sigma_high),
        (dose_high, sigma_low),
        (dose_low, sigma_high),
        (dose_low, sigma_low),
    ]

    outer_envelope = torch.zeros_like(binary)
    inner_envelope = torch.ones_like(binary)

    for dose, sigma in corners:
        aerial = simulate_aerial_image(binary, sigma_px=sigma, dose=dose)
        resist = apply_resist_threshold(aerial, threshold=0.5)
        outer_envelope = torch.maximum(outer_envelope, resist)
        inner_envelope = torch.minimum(inner_envelope, resist)

    band = (outer_envelope - inner_envelope).clamp(min=0.0)

    band_pixels = band.sum().item()
    if band_pixels < 1.0:
        return {"pvband_mean_nm": 0.0, "pvband_max_nm": 0.0}

    band_binary = (band > 0.5).float()
    dist_map = distance_transform(band_binary)

    band_mask = band_binary > 0.5
    if band_mask.any():
        distances = dist_map[band_mask] * pixel_size_nm
        pvband_mean = float(distances.mean().item())
        pvband_max = float(distances.max().item())
    else:
        pvband_mean = 0.0
        pvband_max = 0.0

    return {"pvband_mean_nm": pvband_mean, "pvband_max_nm": pvband_max}

openlithohub.benchmark.metrics.shot_count

Shot count estimation for mask manufacturing cost.

estimate_shot_count(mask, writer_type='mbmw', min_shot_size_nm=5.0, pixel_size_nm=1.0)

Estimate the number of shots needed to write a mask.

Shot count is a direct proxy for mask writing time and manufacturing cost.

For multi-beam mask writers (MBMW), each foreground pixel corresponds to one beam exposure position. Shot count equals the number of foreground pixels scaled by the ratio of pixel area to beam grid area.

For variable shaped beam (VSB) writers, shots are rectangular exposures. The estimate uses the mask complexity (perimeter/area ratio) to approximate the number of rectangles needed.

Parameters:

Name Type Description Default
mask Tensor

Binary mask tensor (H, W).

required
writer_type str

'vsb' (variable shaped beam) or 'mbmw' (multi-beam).

'mbmw'
min_shot_size_nm float

Minimum addressable shot dimension.

5.0
pixel_size_nm float

Physical pixel size in nanometers.

1.0

Returns:

Type Description
dict[str, int | float]

Dictionary with 'shot_count' and 'estimated_write_time_s'.

Raises:

Type Description
ValueError

If writer_type is not 'mbmw' or 'vsb'.

Source code in src/openlithohub/benchmark/metrics/shot_count.py
def estimate_shot_count(
    mask: torch.Tensor,
    writer_type: str = "mbmw",
    min_shot_size_nm: float = 5.0,
    pixel_size_nm: float = 1.0,
) -> dict[str, int | float]:
    """Estimate the number of shots needed to write a mask.

    Shot count is a direct proxy for mask writing time and manufacturing cost.

    For multi-beam mask writers (MBMW), each foreground pixel corresponds to
    one beam exposure position. Shot count equals the number of foreground pixels
    scaled by the ratio of pixel area to beam grid area.

    For variable shaped beam (VSB) writers, shots are rectangular exposures.
    The estimate uses the mask complexity (perimeter/area ratio) to approximate
    the number of rectangles needed.

    Args:
        mask: Binary mask tensor (H, W).
        writer_type: 'vsb' (variable shaped beam) or 'mbmw' (multi-beam).
        min_shot_size_nm: Minimum addressable shot dimension.
        pixel_size_nm: Physical pixel size in nanometers.

    Returns:
        Dictionary with 'shot_count' and 'estimated_write_time_s'.

    Raises:
        ValueError: If writer_type is not 'mbmw' or 'vsb'.
    """
    if writer_type not in ("mbmw", "vsb"):
        raise ValueError(f"writer_type must be 'mbmw' or 'vsb', got '{writer_type}'")

    m = ensure_2d(mask)
    binary = (m > 0.5).float()

    foreground_pixels = int(binary.sum().item())

    if foreground_pixels == 0:
        return {"shot_count": 0, "estimated_write_time_s": 0.0}

    if writer_type == "mbmw":
        return _estimate_mbmw(binary, foreground_pixels, min_shot_size_nm, pixel_size_nm)
    return _estimate_vsb(binary, foreground_pixels, min_shot_size_nm, pixel_size_nm)

openlithohub.benchmark.metrics.stochastic

EUV stochastic robustness evaluation.

compute_stochastic_robustness(mask, num_trials=100, dose_photons_per_nm2=30.0, pixel_size_nm=1.0, seed=None)

Evaluate mask robustness against EUV photon shot noise.

Simulates stochastic resist exposure via Poisson photon noise to quantify probability of micro-bridging and line breaks.

Source code in src/openlithohub/benchmark/metrics/stochastic.py
def compute_stochastic_robustness(
    mask: torch.Tensor,
    num_trials: int = 100,
    dose_photons_per_nm2: float = 30.0,
    pixel_size_nm: float = 1.0,
    seed: int | None = None,
) -> dict[str, float]:
    """Evaluate mask robustness against EUV photon shot noise.

    Simulates stochastic resist exposure via Poisson photon noise to quantify
    probability of micro-bridging and line breaks.
    """
    m = ensure_2d(mask)
    binary = (m > 0.5).float()

    sigma_px = 2.0
    aerial_nominal = simulate_aerial_image(binary, sigma_px=sigma_px, dose=1.0)
    resist_nominal = apply_resist_threshold(aerial_nominal, threshold=0.5)

    nominal_fg_components = _count_connected_components(resist_nominal)
    nominal_bg_components = _count_connected_components((resist_nominal < 0.5).float())

    pixel_area_nm2 = pixel_size_nm * pixel_size_nm
    lambda_map = aerial_nominal.clamp(min=0.0) * dose_photons_per_nm2 * pixel_area_nm2

    generator = torch.Generator(device=mask.device)
    if seed is not None:
        generator.manual_seed(seed)

    bridge_count = 0
    break_count = 0
    ler_values: list[float] = []

    nominal_edge_dist = distance_transform(resist_nominal)
    nominal_edges = (nominal_edge_dist > 0) & (nominal_edge_dist <= 1.5)

    batch_size = min(10, num_trials)
    trials_done = 0

    while trials_done < num_trials:
        current_batch = min(batch_size, num_trials - trials_done)

        for _ in range(current_batch):
            if seed is not None:
                trial_seed = seed + trials_done
                generator.manual_seed(trial_seed)

            photons = torch.poisson(lambda_map, generator=generator)
            noisy_intensity = photons / max(dose_photons_per_nm2 * pixel_area_nm2, 1e-12)
            noisy_resist = apply_resist_threshold(noisy_intensity, threshold=0.5)

            noisy_fg_components = _count_connected_components(noisy_resist)
            noisy_bg_components = _count_connected_components((noisy_resist < 0.5).float())

            if noisy_fg_components < nominal_fg_components:
                bridge_count += 1
            if noisy_bg_components < nominal_bg_components:
                break_count += 1

            diff = (noisy_resist - resist_nominal).abs()
            if nominal_edges.any():
                edge_displacement = diff[nominal_edges].mean().item() * pixel_size_nm
                ler_values.append(edge_displacement)

            trials_done += 1

    bridge_probability = bridge_count / max(num_trials, 1)
    break_probability = break_count / max(num_trials, 1)
    ler_mean_nm = sum(ler_values) / max(len(ler_values), 1) if ler_values else 0.0
    robustness_score = max(0.0, 1.0 - (bridge_probability + break_probability) / 2.0)

    return {
        "bridge_probability": bridge_probability,
        "break_probability": break_probability,
        "ler_mean_nm": ler_mean_nm,
        "robustness_score": robustness_score,
    }

Compliance

openlithohub.benchmark.compliance.mrc

Mask Rule Check (MRC) — minimum width/spacing for mask manufacturing.

MRCResult dataclass

Result of a Mask Rule Check.

Source code in src/openlithohub/benchmark/compliance/mrc.py
@dataclass
class MRCResult:
    """Result of a Mask Rule Check."""

    passed: bool
    violation_count: int
    violation_rate: float
    violations: list[dict[str, float]]

check_mrc(mask, min_width_nm=40.0, min_spacing_nm=40.0, pixel_size_nm=1.0)

Check mask against minimum width and spacing rules.

MRC violations are a hard-fail metric — a mask that violates these rules cannot be manufactured regardless of optical performance.

Width check: perform morphological opening (erosion then dilation) with radius = floor(min_width / (2 * pixel_size)). Features that survive opening are wide enough. Foreground pixels that disappear after opening are width violation pixels.

Spacing check: same logic on the inverted mask — gaps between features that disappear under opening are too narrow.

Parameters:

Name Type Description Default
mask Tensor

Binary mask tensor (H, W) or (B, C, H, W).

required
min_width_nm float

Minimum allowed feature width.

40.0
min_spacing_nm float

Minimum allowed spacing between features.

40.0
pixel_size_nm float

Physical pixel size for unit conversion.

1.0

Returns:

Type Description
MRCResult

MRCResult with pass/fail status and violation details.

Source code in src/openlithohub/benchmark/compliance/mrc.py
def check_mrc(
    mask: torch.Tensor,
    min_width_nm: float = 40.0,
    min_spacing_nm: float = 40.0,
    pixel_size_nm: float = 1.0,
) -> MRCResult:
    """Check mask against minimum width and spacing rules.

    MRC violations are a hard-fail metric — a mask that violates these rules
    cannot be manufactured regardless of optical performance.

    Width check: perform morphological opening (erosion then dilation) with
    radius = floor(min_width / (2 * pixel_size)). Features that survive opening
    are wide enough. Foreground pixels that disappear after opening are width
    violation pixels.

    Spacing check: same logic on the inverted mask — gaps between features that
    disappear under opening are too narrow.

    Args:
        mask: Binary mask tensor (H, W) or (B, C, H, W).
        min_width_nm: Minimum allowed feature width.
        min_spacing_nm: Minimum allowed spacing between features.
        pixel_size_nm: Physical pixel size for unit conversion.

    Returns:
        MRCResult with pass/fail status and violation details.
    """
    m = ensure_2d(mask)
    binary = (m > 0.5).float()

    h, w = binary.shape
    total_pixels = h * w
    has_foreground = binary.sum() > 0
    has_background = (1.0 - binary).sum() > 0

    violations: list[dict[str, float]] = []

    radius_width = int(math.floor(min_width_nm / (2.0 * pixel_size_nm)))
    radius_spacing = int(math.floor(min_spacing_nm / (2.0 * pixel_size_nm)))

    width_violation_count = 0
    spacing_violation_count = 0

    if has_foreground and radius_width >= 1:
        opened = binary_dilation(binary_erosion(binary, radius=radius_width), radius=radius_width)
        width_violation_mask = (binary > 0.5) & (opened < 0.5)
        width_violation_count = int(width_violation_mask.sum().item())

        if width_violation_count > 0:
            fg_dist = distance_transform(binary)
            ys, xs = torch.where(width_violation_mask)
            _add_violations(violations, "width", ys, xs, fg_dist, pixel_size_nm, min_width_nm)

    if has_foreground and has_background and radius_spacing >= 1:
        bg = (binary < 0.5).float()
        eroded_bg = binary_erosion(bg, radius=radius_spacing)
        opened_bg = binary_dilation(eroded_bg, radius=radius_spacing)
        spacing_violation_mask = (bg > 0.5) & (opened_bg < 0.5)
        spacing_violation_count = int(spacing_violation_mask.sum().item())

        if spacing_violation_count > 0:
            bg_dist = distance_transform(bg)
            ys, xs = torch.where(spacing_violation_mask)
            _add_violations(violations, "spacing", ys, xs, bg_dist, pixel_size_nm, min_spacing_nm)

    violation_count = width_violation_count + spacing_violation_count
    violation_rate = violation_count / total_pixels if total_pixels > 0 else 0.0

    return MRCResult(
        passed=violation_count == 0,
        violation_count=violation_count,
        violation_rate=violation_rate,
        violations=violations,
    )

openlithohub.benchmark.compliance.drc

Design Rule Check (DRC) — layout-level geometric constraint validation.

DRCRuleDeck dataclass

Configuration for DRC rules.

Source code in src/openlithohub/benchmark/compliance/drc.py
@dataclass
class DRCRuleDeck:
    """Configuration for DRC rules."""

    min_width_nm: float = 40.0
    min_spacing_nm: float = 40.0
    min_area_nm2: float = 100.0
    min_notch_nm: float = 30.0

DRCResult dataclass

Result of a Design Rule Check.

Source code in src/openlithohub/benchmark/compliance/drc.py
@dataclass
class DRCResult:
    """Result of a Design Rule Check."""

    passed: bool
    violation_count: int
    violations: list[dict[str, float]]
    rule_summary: dict[str, int] = field(default_factory=dict)

check_drc(mask, rule_deck='default', pixel_size_nm=1.0)

Run Design Rule Check on a mask layout.

Checks: minimum width, minimum spacing, minimum area, notch detection.

Source code in src/openlithohub/benchmark/compliance/drc.py
def check_drc(
    mask: torch.Tensor,
    rule_deck: str | DRCRuleDeck = "default",
    pixel_size_nm: float = 1.0,
) -> DRCResult:
    """Run Design Rule Check on a mask layout.

    Checks: minimum width, minimum spacing, minimum area, notch detection.
    """
    m = ensure_2d(mask)
    binary = (m > 0.5).float()

    rules = _RULE_DECKS.get(rule_deck, _DEFAULT_RULES) if isinstance(rule_deck, str) else rule_deck

    violations: list[dict[str, float]] = []
    rule_summary: dict[str, int] = {}

    width_violations = _check_width(binary, rules.min_width_nm, pixel_size_nm)
    rule_summary["min_width"] = len(width_violations)
    violations.extend(width_violations)

    spacing_violations = _check_spacing(binary, rules.min_spacing_nm, pixel_size_nm)
    rule_summary["min_spacing"] = len(spacing_violations)
    violations.extend(spacing_violations)

    area_violations = _check_min_area(binary, rules.min_area_nm2, pixel_size_nm)
    rule_summary["min_area"] = len(area_violations)
    violations.extend(area_violations)

    notch_violations = _check_notch(binary, rules.min_notch_nm, pixel_size_nm)
    rule_summary["notch"] = len(notch_violations)
    violations.extend(notch_violations)

    violation_count = len(violations)
    return DRCResult(
        passed=violation_count == 0,
        violation_count=violation_count,
        violations=violations,
        rule_summary=rule_summary,
    )

Report

openlithohub.benchmark.report

Evaluation report generation.

generate_report(metrics, output_format='table')

Generate a formatted evaluation report from computed metrics.

Parameters:

Name Type Description Default
metrics dict[str, Any]

Dictionary of metric names to values.

required
output_format str

'table' (rich terminal), 'json', or 'markdown'.

'table'

Returns:

Type Description
str

Formatted report string.

Source code in src/openlithohub/benchmark/report.py
def generate_report(
    metrics: dict[str, Any],
    output_format: str = "table",
) -> str:
    """Generate a formatted evaluation report from computed metrics.

    Args:
        metrics: Dictionary of metric names to values.
        output_format: 'table' (rich terminal), 'json', or 'markdown'.

    Returns:
        Formatted report string.
    """
    if output_format == "json":
        return json.dumps(metrics, indent=2)

    if output_format == "markdown":
        return _format_markdown(metrics)

    return _format_table(metrics)