"""
Step 1b: Download and extract the llama.cpp Vulkan prebuilt binary.
Outputs: LLAMA_INSTALL=DONE
"""
import os, sys, tempfile, urllib.request, zipfile
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from _ocr_cfg import OCR_DIR

if not OCR_DIR:
    print("ERROR: OCR_DIR not resolved. Run preflight_workdir.py first.")
    sys.exit(1)

# ── Replace with the latest tag from https://github.com/ggml-org/llama.cpp/releases/latest ──
TAG = "b8400"

llama_dir = os.path.join(OCR_DIR, "llama.cpp")
url       = f"https://github.com/ggml-org/llama.cpp/releases/download/{TAG}/llama-{TAG}-bin-win-vulkan-x64.zip"

print(f"Downloading llama.cpp {TAG} ...")
zip_path = os.path.join(tempfile.gettempdir(), "llama-vulkan.zip")
urllib.request.urlretrieve(url, zip_path)

os.makedirs(llama_dir, exist_ok=True)
with zipfile.ZipFile(zip_path, "r") as zf:
    zf.extractall(llama_dir)
os.remove(zip_path)
print("LLAMA_INSTALL=DONE")
