def run_benchmark_case(
*,
manager: ModelManager,
audio_path: Path,
model: str,
language: str,
cpu_threads: int,
num_workers: int,
beam_size: int,
vad_filter: bool,
) -> BenchmarkRecord:
runtime = RuntimeOptions(preset=model, cpu_threads=cpu_threads, num_workers=num_workers)
options = normalize_options(
overrides={
"language": language,
"beam_size": beam_size,
"best_of": 1,
"vad_filter": vad_filter,
"condition_on_previous_text": False,
}
)
cpu_started = _cpu_seconds()
started = time.perf_counter()
result, timing = manager.transcribe(runtime, str(audio_path), options)
wall_seconds = time.perf_counter() - started
cpu_seconds = _cpu_seconds() - cpu_started
return BenchmarkRecord(
audio_path=str(audio_path),
model=model,
language=language,
cpu_threads=cpu_threads,
num_workers=num_workers,
beam_size=beam_size,
vad_filter=vad_filter,
wall_seconds=round(wall_seconds, 3),
cpu_seconds=round(cpu_seconds, 3),
cpu_percent=round((cpu_seconds / wall_seconds * 100.0) if wall_seconds > 0 else 0.0, 1),
load_seconds=float(timing["load_seconds"]),
transcribe_seconds=float(timing["transcribe_seconds"]),
total_seconds=float(timing["total_seconds"]),
cache_hit=bool(timing["cache_hit"]),
peak_rss_bytes=current_rss_bytes(),
text_preview=_preview(result.text),
)