1
etude_lego_jurassic_world/tests/test_resources.py

109 lines
3.8 KiB
Python

"""Tests des outils de téléchargement de ressources."""
from pathlib import Path
from lib.rebrickable.resources import (
add_part_img_urls,
build_download_plan,
build_part_img_lookup,
download_resources,
sanitize_name,
write_minifigs_by_set_with_images,
)
from lib.filesystem import ensure_parent_dir
from lib.rebrickable.stats import read_rows
def test_build_part_img_lookup_calls_fetcher_once_per_part() -> None:
"""Construit un index en appelant le fetcher sur les références uniques."""
calls: list[str] = []
def fetcher(part_num: str) -> str:
calls.append(part_num)
return f"url-{part_num}"
lookup = build_part_img_lookup(["p1", "p2", "p1"], fetcher, delay_seconds=0)
assert lookup == {"p1": "url-p1", "p2": "url-p2"}
assert calls == ["p1", "p2"]
def test_add_part_img_urls_and_write(tmp_path: Path) -> None:
"""Ajoute les URLs de tête et réécrit minifigs_by_set."""
rows = [
{"set_num": "123-1", "part_num": "p1", "known_character": "Alice", "fig_num": "f1", "gender": "female"},
]
lookup = {"p1": "http://img/p1.jpg"}
enriched = add_part_img_urls(rows, lookup)
destination = tmp_path / "minifigs_by_set.csv"
write_minifigs_by_set_with_images(destination, enriched)
assert read_rows(destination) == [
{
"set_num": "123-1",
"part_num": "p1",
"known_character": "Alice",
"fig_num": "f1",
"gender": "female",
"part_img_url": "http://img/p1.jpg",
}
]
def test_build_download_plan_and_download(tmp_path: Path) -> None:
"""Construit le plan et télécharge les binaires via un downloader stub."""
sets_rows = [
{"set_num": "123-1", "set_id": "123", "img_url": "http://set.img", "name": "A", "year": "2020"},
]
minifigs_rows = [
{"set_num": "123-1", "part_num": "p1", "known_character": "Bob", "fig_num": "fig-1", "gender": "male", "part_img_url": "http://head.img"}
]
minifigs_catalog = {"fig-1": {"img_url": "http://fig.img"}}
base_dir = tmp_path / "resources"
plan = build_download_plan(sets_rows, minifigs_rows, minifigs_catalog, base_dir)
downloaded: list[tuple[str, Path]] = []
def downloader(url: str, path: Path) -> bool:
downloaded.append((url, path))
path.parent.mkdir(parents=True, exist_ok=True)
path.write_bytes(b"data")
return True
download_resources(plan, downloader, delay_seconds=0, log_path=tmp_path / "log.csv")
assert downloaded == [
("http://set.img", base_dir / "123" / "set.jpg"),
("http://fig.img", base_dir / "123" / "Bob" / "minifig.jpg"),
("http://head.img", base_dir / "123" / "Bob" / "head.jpg"),
]
assert (base_dir / "123" / "Bob" / "head.jpg").exists()
def test_download_resources_duplicates_from_cache(tmp_path: Path) -> None:
"""Duplique les fichiers déjà téléchargés pour d'autres sets."""
plan = [
{"url": "http://same.img", "path": tmp_path / "resources" / "111" / "set.jpg"},
{"url": "http://same.img", "path": tmp_path / "resources" / "222" / "set.jpg"},
]
downloads: list[tuple[str, Path]] = []
def downloader(url: str, path: Path) -> bool:
downloads.append((url, path))
ensure_parent_dir(path)
path.write_bytes(b"img")
return True
download_resources(plan, downloader, delay_seconds=0, log_path=tmp_path / "log.csv")
assert downloads == [("http://same.img", tmp_path / "resources" / "111" / "set.jpg")]
assert (tmp_path / "resources" / "222" / "set.jpg").exists()
def test_sanitize_name_handles_special_chars() -> None:
"""Nettoie les noms en enlevant les caractères spéciaux."""
assert sanitize_name("Owen Grady") == "Owen_Grady"
assert sanitize_name("Kayla-Watts!") == "Kayla_Watts"
assert sanitize_name("") == "Unknown"