You've already forked etude_lego_jurassic_world
Premiers éléments de l'étude
This commit is contained in:
63
tests/test_colors_grid_plot.py
Normal file
63
tests/test_colors_grid_plot.py
Normal file
@@ -0,0 +1,63 @@
|
||||
"""Tests de la visualisation des couleurs utilisées."""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
import matplotlib
|
||||
|
||||
from lib.plots.colors_grid import build_hex_positions, load_used_colors, plot_colors_grid
|
||||
|
||||
|
||||
matplotlib.use("Agg")
|
||||
|
||||
|
||||
def write_csv(path: Path, headers: list[str], rows: list[list[str]]) -> None:
|
||||
"""Écrit un CSV simple pour les besoins de tests."""
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with path.open("w", newline="") as csv_file:
|
||||
import csv
|
||||
|
||||
writer = csv.writer(csv_file)
|
||||
writer.writerow(headers)
|
||||
writer.writerows(rows)
|
||||
|
||||
|
||||
def test_build_hex_positions() -> None:
|
||||
"""Construit suffisamment de positions pour toutes les couleurs."""
|
||||
positions = build_hex_positions(10, columns=4, spacing=1.0)
|
||||
assert len(positions) == 10
|
||||
assert positions[0] == (0.0, 0.0)
|
||||
assert positions[1][0] > positions[0][0]
|
||||
|
||||
|
||||
def test_plot_colors_grid(tmp_path: Path) -> None:
|
||||
"""Produit un fichier image avec les couleurs utilisées."""
|
||||
parts_path = tmp_path / "parts_filtered.csv"
|
||||
colors_path = tmp_path / "colors.csv"
|
||||
destination_path = tmp_path / "colors_grid.png"
|
||||
|
||||
write_csv(
|
||||
parts_path,
|
||||
["part_num", "color_rgb", "is_translucent", "set_id", "quantity_in_set", "is_spare"],
|
||||
[
|
||||
["3001", "FFFFFF", "false", "1000", "2", "false"],
|
||||
["3002", "000000", "true", "1000", "5", "false"],
|
||||
["3003", "FF0000", "false", "1000", "1", "true"],
|
||||
],
|
||||
)
|
||||
write_csv(
|
||||
colors_path,
|
||||
["id", "name", "rgb", "is_trans", "num_parts", "num_sets", "y1", "y2"],
|
||||
[
|
||||
["1", "White", "FFFFFF", "False", "0", "0", "0", "0"],
|
||||
["2", "Black", "000000", "True", "0", "0", "0", "0"],
|
||||
["3", "Red", "FF0000", "False", "0", "0", "0", "0"],
|
||||
],
|
||||
)
|
||||
|
||||
colors = load_used_colors(parts_path, colors_path)
|
||||
assert len(colors) == 3
|
||||
|
||||
plot_colors_grid(parts_path, colors_path, destination_path)
|
||||
|
||||
assert destination_path.exists()
|
||||
assert destination_path.stat().st_size > 0
|
||||
91
tests/test_downloader.py
Normal file
91
tests/test_downloader.py
Normal file
@@ -0,0 +1,91 @@
|
||||
"""Tests du module de téléchargement Rebrickable."""
|
||||
|
||||
import gzip
|
||||
from pathlib import Path
|
||||
|
||||
import responses
|
||||
|
||||
from lib.rebrickable.downloader import (
|
||||
build_rebrickable_url,
|
||||
download_rebrickable_file,
|
||||
download_rebrickable_files,
|
||||
)
|
||||
|
||||
|
||||
def test_build_rebrickable_url() -> None:
|
||||
"""Construit l'URL complète vers Rebrickable."""
|
||||
assert build_rebrickable_url("themes.csv.gz") == (
|
||||
"https://cdn.rebrickable.com/media/downloads/themes.csv.gz"
|
||||
)
|
||||
|
||||
|
||||
@responses.activate
|
||||
def test_download_rebrickable_file(tmp_path: Path) -> None:
|
||||
"""Télécharge, enregistre et décompresse le fichier compressé."""
|
||||
file_name = "themes.csv.gz"
|
||||
uncompressed_content = b"compressed-data"
|
||||
compressed_body = gzip.compress(uncompressed_content)
|
||||
responses.add(
|
||||
responses.GET,
|
||||
build_rebrickable_url(file_name),
|
||||
body=compressed_body,
|
||||
status=200,
|
||||
)
|
||||
|
||||
target_path = download_rebrickable_file(file_name, tmp_path)
|
||||
|
||||
assert target_path == tmp_path / "themes.csv"
|
||||
assert target_path.read_bytes() == uncompressed_content
|
||||
assert not (tmp_path / file_name).exists()
|
||||
|
||||
|
||||
@responses.activate
|
||||
def test_download_skips_when_cache_is_fresh(tmp_path: Path) -> None:
|
||||
"""Ne retélécharge pas un fichier récent et conserve le contenu."""
|
||||
file_name = "themes.csv.gz"
|
||||
cached_path = tmp_path / "themes.csv"
|
||||
cached_path.write_bytes(b"cached")
|
||||
|
||||
target_path = download_rebrickable_file(file_name, tmp_path)
|
||||
|
||||
assert target_path == cached_path
|
||||
assert target_path.read_bytes() == b"cached"
|
||||
assert not (tmp_path / file_name).exists()
|
||||
assert len(responses.calls) == 0
|
||||
|
||||
|
||||
@responses.activate
|
||||
def test_download_multiple_rebrickable_files(tmp_path: Path) -> None:
|
||||
"""Télécharge plusieurs fichiers compressés et les décompresse."""
|
||||
file_names = [
|
||||
"inventories.csv.gz",
|
||||
"inventory_parts.csv.gz",
|
||||
"parts.csv.gz",
|
||||
"colors.csv.gz",
|
||||
]
|
||||
compressed_bodies = {}
|
||||
for file_name in file_names:
|
||||
uncompressed_content = file_name.encode()
|
||||
compressed_body = gzip.compress(uncompressed_content)
|
||||
compressed_bodies[file_name] = compressed_body
|
||||
responses.add(
|
||||
responses.GET,
|
||||
build_rebrickable_url(file_name),
|
||||
body=compressed_body,
|
||||
status=200,
|
||||
)
|
||||
|
||||
downloaded_paths = download_rebrickable_files(file_names, tmp_path)
|
||||
|
||||
assert downloaded_paths == [
|
||||
tmp_path / "inventories.csv",
|
||||
tmp_path / "inventory_parts.csv",
|
||||
tmp_path / "parts.csv",
|
||||
tmp_path / "colors.csv",
|
||||
]
|
||||
assert len(responses.calls) == len(file_names)
|
||||
for file_name in file_names:
|
||||
target_path = tmp_path / file_name
|
||||
decompressed_path = target_path.with_suffix("")
|
||||
assert decompressed_path.read_bytes() == file_name.encode()
|
||||
assert not target_path.exists()
|
||||
77
tests/test_enrich_sets.py
Normal file
77
tests/test_enrich_sets.py
Normal file
@@ -0,0 +1,77 @@
|
||||
"""Tests de l'enrichissement des sets filtrés."""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from lib.rebrickable.enrich_sets import (
|
||||
build_rebrickable_set_url,
|
||||
enrich_sets,
|
||||
extract_set_id,
|
||||
load_owned_set_ids,
|
||||
parse_set_collection_root,
|
||||
write_missing_sets_markdown,
|
||||
)
|
||||
|
||||
|
||||
def test_extract_set_id_removes_revision() -> None:
|
||||
"""Supprime la révision de l'identifiant set_num."""
|
||||
assert extract_set_id("75936-1") == "75936"
|
||||
|
||||
|
||||
def test_build_rebrickable_set_url() -> None:
|
||||
"""Construit l'URL publique Rebrickable à partir du set_num."""
|
||||
assert build_rebrickable_set_url("75936-1") == "https://rebrickable.com/sets/75936-1"
|
||||
|
||||
|
||||
def test_parse_set_collection_root_empty_returns_none() -> None:
|
||||
"""Renvoie None pour une valeur vide."""
|
||||
assert parse_set_collection_root(" ") is None
|
||||
|
||||
|
||||
def test_load_owned_set_ids_handles_missing_and_collects(tmp_path: Path) -> None:
|
||||
"""Retourne les sets présents sous forme de dossiers, vide si rien n'existe."""
|
||||
missing_root = tmp_path / "absent"
|
||||
assert load_owned_set_ids(missing_root) == set()
|
||||
|
||||
root = tmp_path / "collection"
|
||||
root.mkdir()
|
||||
(root / "75936").mkdir()
|
||||
(root / "75944").mkdir()
|
||||
assert load_owned_set_ids(root) == {"75936", "75944"}
|
||||
|
||||
|
||||
def test_enrich_sets_adds_columns_and_collection(tmp_path: Path) -> None:
|
||||
"""Enrichit le CSV avec set_id, URL et possession."""
|
||||
source = tmp_path / "sets_filtered.csv"
|
||||
destination = tmp_path / "sets_enriched.csv"
|
||||
source.write_text(
|
||||
"set_num,name,year,theme_id\n"
|
||||
"75936-1,T. rex Rampage,2019,602\n"
|
||||
"10757-1,Raptor Rescue Truck,2018,620\n"
|
||||
)
|
||||
|
||||
enrich_sets(source, destination, {"75936"})
|
||||
|
||||
assert destination.read_text() == (
|
||||
"set_num,name,year,theme_id,set_id,rebrickable_url,in_collection\n"
|
||||
"75936-1,T. rex Rampage,2019,602,75936,https://rebrickable.com/sets/75936-1,true\n"
|
||||
"10757-1,Raptor Rescue Truck,2018,620,10757,https://rebrickable.com/sets/10757-1,false\n"
|
||||
)
|
||||
|
||||
|
||||
def test_write_missing_sets_markdown(tmp_path: Path) -> None:
|
||||
"""Construit un tableau Markdown des sets non possédés."""
|
||||
enriched = tmp_path / "sets_enriched.csv"
|
||||
markdown = tmp_path / "sets_missing.md"
|
||||
enriched.write_text(
|
||||
"set_num,name,year,theme_id,set_id,rebrickable_url,in_collection\n"
|
||||
"75936-1,T. rex Rampage,2019,602,75936,https://rebrickable.com/sets/75936-1,true\n"
|
||||
"10757-1,Raptor Rescue Truck,2018,620,10757,https://rebrickable.com/sets/10757-1,false\n"
|
||||
)
|
||||
|
||||
write_missing_sets_markdown(enriched, markdown)
|
||||
|
||||
assert markdown.read_text() == (
|
||||
"| set_id | year | name |\n"
|
||||
"| --- | --- | --- |\n"
|
||||
"| [10757](https://rebrickable.com/sets/10757-1) | 2018 | Raptor Rescue Truck |\n"
|
||||
)
|
||||
37
tests/test_filter_sets.py
Normal file
37
tests/test_filter_sets.py
Normal file
@@ -0,0 +1,37 @@
|
||||
"""Tests du filtrage des sets par thèmes."""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from lib.rebrickable.filter_sets import filter_sets_by_theme, parse_theme_ids
|
||||
|
||||
|
||||
def test_parse_theme_ids_strips_and_validates() -> None:
|
||||
"""Nettoie la liste et refuse une valeur vide."""
|
||||
assert parse_theme_ids(" 274 , 602 ,620") == ["274", "602", "620"]
|
||||
with pytest.raises(ValueError):
|
||||
parse_theme_ids(" , , ")
|
||||
|
||||
|
||||
def test_filter_sets_by_theme(tmp_path: Path) -> None:
|
||||
"""Conserve uniquement les sets des thèmes ciblés avec pièces et préserve l'entête."""
|
||||
source = tmp_path / "sets.csv"
|
||||
destination = tmp_path / "filtered.csv"
|
||||
overrides = tmp_path / "overrides.csv"
|
||||
source.write_text(
|
||||
"set_num,name,year,theme_id,num_parts,img_url\n"
|
||||
"75936,T. rex Rampage,2019,602,3120,https://example\n"
|
||||
"43221,100 Years of Disney Animation Icons,2023,710,0,https://example\n"
|
||||
"75944,Indominus rex vs. Ankylosaurus,2020,602,1000,https://example\n"
|
||||
"10757,Raptor Rescue Truck,2018,620,0,https://example\n"
|
||||
)
|
||||
overrides.write_text("set_num,num_parts\n75936,3121\n")
|
||||
|
||||
filter_sets_by_theme(source, destination, ["602"], overrides)
|
||||
|
||||
assert destination.read_text() == (
|
||||
"set_num,name,year,theme_id,num_parts,img_url\n"
|
||||
"75936,T. rex Rampage,2019,602,3121,https://example\n"
|
||||
"75944,Indominus rex vs. Ankylosaurus,2020,602,1000,https://example\n"
|
||||
)
|
||||
144
tests/test_inventory_gaps.py
Normal file
144
tests/test_inventory_gaps.py
Normal file
@@ -0,0 +1,144 @@
|
||||
"""Tests des écarts d'inventaire calculés depuis parts_filtered.csv."""
|
||||
|
||||
import csv
|
||||
from pathlib import Path
|
||||
|
||||
from lib.rebrickable.inventory_reconciliation import (
|
||||
compute_inventory_gaps,
|
||||
index_sets_by_num,
|
||||
write_inventory_gaps_csv,
|
||||
write_inventory_gaps_markdown,
|
||||
)
|
||||
|
||||
|
||||
def write_csv(path: Path, headers: list[str], rows: list[list[str]]) -> None:
|
||||
"""Écrit un CSV simple pour les besoins des tests."""
|
||||
with path.open("w", newline="") as csv_file:
|
||||
writer = csv.writer(csv_file)
|
||||
writer.writerow(headers)
|
||||
writer.writerows(rows)
|
||||
|
||||
|
||||
def test_compute_inventory_gaps_excludes_spares(tmp_path: Path) -> None:
|
||||
"""Ignore les pièces de rechange et ne conserve que les sets en écart."""
|
||||
sets_path = tmp_path / "sets_enriched.csv"
|
||||
parts_path = tmp_path / "parts_filtered.csv"
|
||||
write_csv(
|
||||
sets_path,
|
||||
["set_num", "set_id", "num_parts", "in_collection"],
|
||||
[
|
||||
["1000-1", "1000", "4", "true"],
|
||||
["2000-1", "2000", "3", "false"],
|
||||
["3000-1", "3000", "1", "true"],
|
||||
],
|
||||
)
|
||||
write_csv(
|
||||
parts_path,
|
||||
["part_num", "color_rgb", "is_translucent", "set_num", "set_id", "quantity_in_set", "is_spare"],
|
||||
[
|
||||
["A", "AAAAAA", "false", "1000-1", "1000", "2", "false"],
|
||||
["B", "BBBBBB", "false", "1000-1", "1000", "2", "false"],
|
||||
["S", "SSSSSS", "false", "1000-1", "1000", "5", "true"],
|
||||
["C", "CCCCCC", "false", "2000-1", "2000", "2", "false"],
|
||||
["D", "DDDDDD", "false", "3000-1", "3000", "1", "false"],
|
||||
],
|
||||
)
|
||||
|
||||
gaps = compute_inventory_gaps(sets_path, parts_path)
|
||||
|
||||
assert gaps == [
|
||||
{
|
||||
"set_num": "1000-1",
|
||||
"set_id": "1000",
|
||||
"expected_parts": 4,
|
||||
"inventory_parts": 9,
|
||||
"inventory_parts_non_spare": 4,
|
||||
"delta": 5,
|
||||
"delta_non_spare": 0,
|
||||
"in_collection": "true",
|
||||
},
|
||||
{
|
||||
"set_num": "2000-1",
|
||||
"set_id": "2000",
|
||||
"expected_parts": 3,
|
||||
"inventory_parts": 2,
|
||||
"inventory_parts_non_spare": 2,
|
||||
"delta": 1,
|
||||
"delta_non_spare": 1,
|
||||
"in_collection": "false",
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
def test_write_inventory_gaps_csv(tmp_path: Path) -> None:
|
||||
"""Sérialise le rapport d'écarts dans un CSV dédié."""
|
||||
destination_path = tmp_path / "inventory_gaps.csv"
|
||||
rows = [
|
||||
{
|
||||
"set_num": "2000-1",
|
||||
"set_id": "2000",
|
||||
"expected_parts": 3,
|
||||
"inventory_parts": 2,
|
||||
"inventory_parts_non_spare": 2,
|
||||
"delta": 1,
|
||||
"delta_non_spare": 1,
|
||||
"in_collection": "false",
|
||||
}
|
||||
]
|
||||
|
||||
write_inventory_gaps_csv(destination_path, rows)
|
||||
|
||||
with destination_path.open() as csv_file:
|
||||
written_rows = list(csv.DictReader(csv_file))
|
||||
|
||||
assert written_rows == [
|
||||
{
|
||||
"set_num": "2000-1",
|
||||
"set_id": "2000",
|
||||
"expected_parts": "3",
|
||||
"inventory_parts": "2",
|
||||
"inventory_parts_non_spare": "2",
|
||||
"delta": "1",
|
||||
"delta_non_spare": "1",
|
||||
"in_collection": "false",
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
def test_write_inventory_gaps_markdown(tmp_path: Path) -> None:
|
||||
"""Produit un tableau Markdown listant les sets en écart."""
|
||||
destination_path = tmp_path / "inventory_gaps.md"
|
||||
gaps = [
|
||||
{
|
||||
"set_num": "2000-1",
|
||||
"set_id": "2000",
|
||||
"expected_parts": 3,
|
||||
"inventory_parts": 2,
|
||||
"inventory_parts_non_spare": 2,
|
||||
"delta": 1,
|
||||
"delta_non_spare": 1,
|
||||
"in_collection": "false",
|
||||
}
|
||||
]
|
||||
sets = [
|
||||
{
|
||||
"set_num": "2000-1",
|
||||
"set_id": "2000",
|
||||
"num_parts": "3",
|
||||
"name": "Test Set",
|
||||
"year": "2020",
|
||||
"rebrickable_url": "https://rebrickable.com/sets/2000-1",
|
||||
"in_collection": "false",
|
||||
}
|
||||
]
|
||||
|
||||
write_inventory_gaps_markdown(destination_path, gaps, index_sets_by_num(sets))
|
||||
|
||||
with destination_path.open() as markdown_file:
|
||||
content = markdown_file.read().splitlines()
|
||||
|
||||
assert content[0].startswith("| set_id | name |")
|
||||
assert (
|
||||
"| [2000](https://rebrickable.com/sets/2000-1) | Test Set | 2020 | 1 | 1 | 3 | 2 | 2 | false | [PDF](https://www.lego.com/service/buildinginstructions/2000) |"
|
||||
in content
|
||||
)
|
||||
22
tests/test_milestones.py
Normal file
22
tests/test_milestones.py
Normal file
@@ -0,0 +1,22 @@
|
||||
"""Tests du chargement des jalons configurables."""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from lib.milestones import load_milestones
|
||||
|
||||
|
||||
def test_load_milestones_reads_csv(tmp_path: Path) -> None:
|
||||
"""Charge le CSV et convertit l'année en entier."""
|
||||
source = tmp_path / "milestones.csv"
|
||||
source.write_text(
|
||||
"year,description\n"
|
||||
"1993,Sortie du film Jurassic Park\n"
|
||||
"1997,Sortie du film The Lost World: Jurassic Park\n"
|
||||
)
|
||||
|
||||
milestones = load_milestones(source)
|
||||
|
||||
assert milestones == [
|
||||
{"year": 1993, "description": "Sortie du film Jurassic Park"},
|
||||
{"year": 1997, "description": "Sortie du film The Lost World: Jurassic Park"},
|
||||
]
|
||||
140
tests/test_parts_inventory.py
Normal file
140
tests/test_parts_inventory.py
Normal file
@@ -0,0 +1,140 @@
|
||||
"""Tests de construction du fichier parts_filtered.csv."""
|
||||
|
||||
import csv
|
||||
from pathlib import Path
|
||||
|
||||
from lib.rebrickable.parts_inventory import write_parts_filtered
|
||||
|
||||
|
||||
def write_csv(path: Path, headers: list[str], rows: list[list[str]]) -> None:
|
||||
"""Écrit un CSV simple pour les besoins de tests."""
|
||||
with path.open("w", newline="") as csv_file:
|
||||
writer = csv.writer(csv_file)
|
||||
writer.writerow(headers)
|
||||
writer.writerows(rows)
|
||||
|
||||
|
||||
def test_write_parts_filtered(tmp_path: Path) -> None:
|
||||
"""Assemble les pièces par set avec la dernière version d'inventaire."""
|
||||
sets_path = tmp_path / "sets_enriched.csv"
|
||||
inventories_path = tmp_path / "inventories.csv"
|
||||
inventory_parts_path = tmp_path / "inventory_parts.csv"
|
||||
colors_path = tmp_path / "colors.csv"
|
||||
inventory_minifigs_path = tmp_path / "inventory_minifigs.csv"
|
||||
minifigs_path = tmp_path / "minifigs.csv"
|
||||
destination_path = tmp_path / "parts_filtered.csv"
|
||||
|
||||
write_csv(
|
||||
sets_path,
|
||||
["set_num", "set_id", "name", "num_parts"],
|
||||
[
|
||||
["1234-1", "1234", "Sample Set A", "9"],
|
||||
["5678-1", "5678", "Sample Set B", "2"],
|
||||
],
|
||||
)
|
||||
write_csv(
|
||||
inventories_path,
|
||||
["id", "version", "set_num"],
|
||||
[
|
||||
["1", "1", "1234-1"],
|
||||
["2", "2", "1234-1"],
|
||||
["3", "1", "5678-1"],
|
||||
["4", "1", "fig-123"],
|
||||
],
|
||||
)
|
||||
write_csv(
|
||||
inventory_parts_path,
|
||||
["inventory_id", "part_num", "color_id", "quantity", "is_spare", "img_url"],
|
||||
[
|
||||
["2", "3001", "1", "4", "False", ""],
|
||||
["2", "3002", "2", "1", "True", ""],
|
||||
["3", "3003", "3", "2", "False", ""],
|
||||
["4", "mf-1", "2", "1", "False", ""],
|
||||
["4", "mf-2", "3", "2", "False", ""],
|
||||
],
|
||||
)
|
||||
write_csv(
|
||||
inventory_minifigs_path,
|
||||
["inventory_id", "fig_num", "quantity"],
|
||||
[
|
||||
["2", "fig-123", "1"],
|
||||
],
|
||||
)
|
||||
write_csv(
|
||||
minifigs_path,
|
||||
["fig_num", "name", "num_parts", "img_url"],
|
||||
[
|
||||
["fig-123", "Sample Minifig", "2", ""],
|
||||
],
|
||||
)
|
||||
write_csv(
|
||||
colors_path,
|
||||
["id", "name", "rgb", "is_trans", "num_parts", "num_sets", "y1", "y2"],
|
||||
[
|
||||
["1", "White", "FFFFFF", "False", "0", "0", "0", "0"],
|
||||
["2", "Black", "000000", "True", "0", "0", "0", "0"],
|
||||
["3", "Red", "FF0000", "False", "0", "0", "0", "0"],
|
||||
],
|
||||
)
|
||||
|
||||
write_parts_filtered(
|
||||
sets_path,
|
||||
inventories_path,
|
||||
inventory_parts_path,
|
||||
colors_path,
|
||||
inventory_minifigs_path,
|
||||
minifigs_path,
|
||||
destination_path,
|
||||
)
|
||||
|
||||
with destination_path.open() as result_file:
|
||||
reader = csv.DictReader(result_file)
|
||||
rows = list(reader)
|
||||
|
||||
assert rows == [
|
||||
{
|
||||
"part_num": "3001",
|
||||
"color_rgb": "FFFFFF",
|
||||
"is_translucent": "false",
|
||||
"set_num": "1234-1",
|
||||
"set_id": "1234",
|
||||
"quantity_in_set": "4",
|
||||
"is_spare": "false",
|
||||
},
|
||||
{
|
||||
"part_num": "3002",
|
||||
"color_rgb": "000000",
|
||||
"is_translucent": "true",
|
||||
"set_num": "1234-1",
|
||||
"set_id": "1234",
|
||||
"quantity_in_set": "1",
|
||||
"is_spare": "true",
|
||||
},
|
||||
{
|
||||
"part_num": "mf-1",
|
||||
"color_rgb": "000000",
|
||||
"is_translucent": "true",
|
||||
"set_num": "1234-1",
|
||||
"set_id": "1234",
|
||||
"quantity_in_set": "1",
|
||||
"is_spare": "false",
|
||||
},
|
||||
{
|
||||
"part_num": "mf-2",
|
||||
"color_rgb": "FF0000",
|
||||
"is_translucent": "false",
|
||||
"set_num": "1234-1",
|
||||
"set_id": "1234",
|
||||
"quantity_in_set": "2",
|
||||
"is_spare": "false",
|
||||
},
|
||||
{
|
||||
"part_num": "3003",
|
||||
"color_rgb": "FF0000",
|
||||
"is_translucent": "false",
|
||||
"set_num": "5678-1",
|
||||
"set_id": "5678",
|
||||
"quantity_in_set": "2",
|
||||
"is_spare": "false",
|
||||
},
|
||||
]
|
||||
54
tests/test_parts_per_set_plot.py
Normal file
54
tests/test_parts_per_set_plot.py
Normal file
@@ -0,0 +1,54 @@
|
||||
"""Tests des graphiques sur la moyenne de pièces par set."""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
import matplotlib
|
||||
|
||||
from lib.plots.parts_per_set import (
|
||||
compute_average_parts_per_set,
|
||||
compute_rolling_mean,
|
||||
plot_parts_per_set,
|
||||
)
|
||||
|
||||
|
||||
matplotlib.use("Agg")
|
||||
|
||||
|
||||
def test_compute_average_parts_per_set() -> None:
|
||||
"""Calcule la moyenne annuelle pièces/set."""
|
||||
rows = [
|
||||
{"year": "2020", "num_parts": "100"},
|
||||
{"year": "2020", "num_parts": "200"},
|
||||
{"year": "2021", "num_parts": "150"},
|
||||
]
|
||||
|
||||
series = compute_average_parts_per_set(rows)
|
||||
|
||||
assert series == [(2020, 150.0), (2021, 150.0)]
|
||||
|
||||
|
||||
def test_compute_rolling_mean() -> None:
|
||||
"""Calcule une moyenne glissante 2 ans."""
|
||||
series = [(2020, 100.0), (2021, 200.0), (2022, 300.0)]
|
||||
|
||||
rolling = compute_rolling_mean(series, 2)
|
||||
|
||||
assert rolling == [(2020, 0.0), (2021, 150.0), (2022, 250.0)]
|
||||
|
||||
|
||||
def test_plot_parts_per_set_creates_figure(tmp_path: Path) -> None:
|
||||
"""Génère le fichier image avec moyennes annuelle et glissante."""
|
||||
enriched = tmp_path / "sets_enriched.csv"
|
||||
milestones = tmp_path / "milestones.csv"
|
||||
destination = tmp_path / "figures" / "step07" / "avg_parts_per_set.png"
|
||||
enriched.write_text(
|
||||
"set_num,name,year,theme_id,num_parts,img_url,set_id,rebrickable_url,in_collection\n"
|
||||
"75936-1,T. rex Rampage,2019,602,3120,https://example,75936,https://example,true\n"
|
||||
"75944-1,Indominus rex vs. Ankylosaurus,2020,602,1000,https://example,75944,https://example,false\n"
|
||||
)
|
||||
milestones.write_text("year,description\n2019,LEGO Jurassic World: Legend of Isla Nublar\n")
|
||||
|
||||
plot_parts_per_set(enriched, milestones, destination)
|
||||
|
||||
assert destination.exists()
|
||||
assert destination.stat().st_size > 0
|
||||
85
tests/test_parts_stats.py
Normal file
85
tests/test_parts_stats.py
Normal file
@@ -0,0 +1,85 @@
|
||||
"""Tests des statistiques simples sur les pièces filtrées."""
|
||||
|
||||
import csv
|
||||
from pathlib import Path
|
||||
|
||||
from lib.rebrickable.parts_stats import build_stats, read_rows, write_parts_stats
|
||||
|
||||
|
||||
def write_csv(path: Path, headers: list[str], rows: list[list[str]]) -> None:
|
||||
"""Écrit un CSV simple pour les besoins de tests."""
|
||||
with path.open("w", newline="") as csv_file:
|
||||
writer = csv.writer(csv_file)
|
||||
writer.writerow(headers)
|
||||
writer.writerows(rows)
|
||||
|
||||
|
||||
def test_build_stats(tmp_path: Path) -> None:
|
||||
"""Calcule les statistiques principales sans les pièces de rechange."""
|
||||
parts_path = tmp_path / "parts_filtered.csv"
|
||||
sets_path = tmp_path / "sets_enriched.csv"
|
||||
stats_path = tmp_path / "stats.csv"
|
||||
write_csv(
|
||||
parts_path,
|
||||
["part_num", "color_rgb", "is_translucent", "set_num", "set_id", "quantity_in_set", "is_spare"],
|
||||
[
|
||||
["3001", "FFFFFF", "false", "1000-1", "1000", "2", "false"],
|
||||
["3001", "FFFFFF", "false", "2000-1", "2000", "1", "false"],
|
||||
["3002", "000000", "true", "1000-1", "1000", "5", "false"],
|
||||
["3003", "FF0000", "false", "1000-1", "1000", "1", "true"],
|
||||
],
|
||||
)
|
||||
write_csv(
|
||||
sets_path,
|
||||
["set_num", "set_id", "num_parts", "in_collection"],
|
||||
[
|
||||
["1000-1", "1000", "8", "true"],
|
||||
["2000-1", "2000", "1", "false"],
|
||||
],
|
||||
)
|
||||
write_csv(
|
||||
stats_path,
|
||||
["libelle", "valeur"],
|
||||
[
|
||||
["Total de pièces pour les thèmes filtrés", "9"],
|
||||
],
|
||||
)
|
||||
|
||||
stats = build_stats(read_rows(parts_path), sets_path, parts_path, stats_path)
|
||||
|
||||
assert stats == [
|
||||
("Total de variations de pièces (hors rechanges)", "2"),
|
||||
(
|
||||
"Pièce la moins utilisée (référence + couleur)",
|
||||
"3001 / FFFFFF / false (3)",
|
||||
),
|
||||
(
|
||||
"Pièce la plus commune (référence + couleur)",
|
||||
"3002 / 000000 / true (5)",
|
||||
),
|
||||
("Total de couleurs utilisées (hors rechanges)", "2"),
|
||||
("Total de pièces hors rechanges", "8"),
|
||||
("Ecart total catalogue (stats) - inventaire (hors rechanges)", "1"),
|
||||
("Nombre de sets en écart inventaire/catalogue", "0"),
|
||||
("Ecart maximal inventaire/catalogue", "none (0)"),
|
||||
]
|
||||
|
||||
|
||||
def test_write_parts_stats(tmp_path: Path) -> None:
|
||||
"""Écrit un CSV de statistiques."""
|
||||
destination_path = tmp_path / "parts_stats.csv"
|
||||
stats = [
|
||||
("A", "1"),
|
||||
("B", "2"),
|
||||
]
|
||||
|
||||
write_parts_stats(destination_path, stats)
|
||||
|
||||
with destination_path.open() as csv_file:
|
||||
rows = list(csv.reader(csv_file))
|
||||
|
||||
assert rows == [
|
||||
["libelle", "valeur"],
|
||||
["A", "1"],
|
||||
["B", "2"],
|
||||
]
|
||||
60
tests/test_sets_per_year_plot.py
Normal file
60
tests/test_sets_per_year_plot.py
Normal file
@@ -0,0 +1,60 @@
|
||||
"""Tests du graphique des sets par année."""
|
||||
|
||||
import matplotlib
|
||||
from pathlib import Path
|
||||
|
||||
from lib.plots.sets_per_year import (
|
||||
compute_parts_per_year,
|
||||
compute_sets_per_year,
|
||||
plot_sets_per_year,
|
||||
)
|
||||
|
||||
|
||||
matplotlib.use("Agg")
|
||||
|
||||
|
||||
def test_compute_sets_per_year_counts_and_sorts() -> None:
|
||||
"""Compte les sets par année et renvoie une liste triée."""
|
||||
rows = [
|
||||
{"year": "2020"},
|
||||
{"year": "2019"},
|
||||
{"year": "2020"},
|
||||
]
|
||||
|
||||
series = compute_sets_per_year(rows)
|
||||
|
||||
assert series == [(2019, 1), (2020, 2)]
|
||||
|
||||
|
||||
def test_compute_parts_per_year_sums_and_sorts() -> None:
|
||||
"""Somme les pièces par année et renvoie une liste triée."""
|
||||
rows = [
|
||||
{"year": "2020", "num_parts": "10"},
|
||||
{"year": "2019", "num_parts": "5"},
|
||||
{"year": "2020", "num_parts": "1"},
|
||||
]
|
||||
|
||||
series = compute_parts_per_year(rows)
|
||||
|
||||
assert series == [(2019, 5), (2020, 11)]
|
||||
|
||||
|
||||
def test_plot_sets_per_year_creates_figure(tmp_path: Path) -> None:
|
||||
"""Génère un fichier image avec les jalons fournis."""
|
||||
enriched = tmp_path / "sets_enriched.csv"
|
||||
milestones = tmp_path / "milestones.csv"
|
||||
destination = tmp_path / "figures" / "step07" / "sets_per_year.png"
|
||||
enriched.write_text(
|
||||
"set_num,name,year,theme_id,num_parts,img_url,set_id,rebrickable_url,in_collection\n"
|
||||
"75936-1,T. rex Rampage,2019,602,3120,https://example,75936,https://example,true\n"
|
||||
"75944-1,Indominus rex vs. Ankylosaurus,2020,602,1000,https://example,75944,https://example,false\n"
|
||||
)
|
||||
milestones.write_text(
|
||||
"year,description\n"
|
||||
"2019,Diffusion LEGO Jurassic World: Legend of Isla Nublar\n"
|
||||
)
|
||||
|
||||
plot_sets_per_year(enriched, milestones, destination)
|
||||
|
||||
assert destination.exists()
|
||||
assert destination.stat().st_size > 0
|
||||
83
tests/test_stats.py
Normal file
83
tests/test_stats.py
Normal file
@@ -0,0 +1,83 @@
|
||||
"""Tests des statistiques calculées sur les sets LEGO filtrés."""
|
||||
|
||||
from lib.rebrickable.stats import compute_basic_stats, write_stats_csv
|
||||
|
||||
|
||||
def test_compute_basic_stats_returns_expected_values(tmp_path) -> None:
|
||||
"""Calcule les statistiques principales sur un échantillon maîtrisé."""
|
||||
themes = [
|
||||
{"id": "602", "name": "Jurassic World", "parent_id": ""},
|
||||
{"id": "274", "name": "Jurassic Park III", "parent_id": "273"},
|
||||
]
|
||||
all_sets = [
|
||||
{"set_num": "123-1", "name": "A", "year": "2020", "theme_id": "602", "num_parts": "100", "img_url": ""},
|
||||
{"set_num": "124-1", "name": "B", "year": "2021", "theme_id": "602", "num_parts": "200", "img_url": ""},
|
||||
{"set_num": "125-1", "name": "C", "year": "2021", "theme_id": "274", "num_parts": "300", "img_url": ""},
|
||||
]
|
||||
filtered_sets = [
|
||||
{"set_num": "123-1", "name": "A", "year": "2020", "theme_id": "602", "num_parts": "100", "img_url": ""},
|
||||
{"set_num": "124-1", "name": "B", "year": "2021", "theme_id": "602", "num_parts": "200", "img_url": ""},
|
||||
]
|
||||
enriched_sets = [
|
||||
{
|
||||
"set_num": "123-1",
|
||||
"name": "A",
|
||||
"year": "2020",
|
||||
"theme_id": "602",
|
||||
"num_parts": "100",
|
||||
"img_url": "",
|
||||
"set_id": "123",
|
||||
"rebrickable_url": "",
|
||||
"in_collection": "true",
|
||||
},
|
||||
{
|
||||
"set_num": "124-1",
|
||||
"name": "B",
|
||||
"year": "2021",
|
||||
"theme_id": "602",
|
||||
"num_parts": "200",
|
||||
"img_url": "",
|
||||
"set_id": "124",
|
||||
"rebrickable_url": "",
|
||||
"in_collection": "false",
|
||||
},
|
||||
]
|
||||
|
||||
stats = compute_basic_stats(themes, all_sets, filtered_sets, enriched_sets)
|
||||
|
||||
assert stats == [
|
||||
("Nombre total de sets (catalogue complet)", "3"),
|
||||
("Nombre total de thèmes (catalogue complet)", "2"),
|
||||
("Nombre de sets après filtrage (thèmes ciblés)", "2"),
|
||||
("Nombre moyen de sets par thème (catalogue complet)", "1.50"),
|
||||
("Pourcentage des sets filtrés vs total", "66.67%"),
|
||||
("Taux de possession (thèmes filtrés)", "50.00%"),
|
||||
("Sets dans la collection", "1"),
|
||||
("Sets manquants pour la collection", "1"),
|
||||
("Nombre moyen de pièces par set (thèmes filtrés)", "150.00"),
|
||||
("Médiane de pièces par set (thèmes filtrés)", "150.00"),
|
||||
("Nombre moyen de sets commercialisés par an (thèmes filtrés)", "1.00"),
|
||||
("Total de pièces pour les thèmes filtrés", "300"),
|
||||
("Total de pièces des sets possédés", "100"),
|
||||
("Pourcentage de pièces possédées (thèmes filtrés)", "33.33%"),
|
||||
("Nombre de thèmes filtrés", "1"),
|
||||
("Première année de sortie (thèmes filtrés)", "2020"),
|
||||
("Dernière année de sortie (thèmes filtrés)", "2021"),
|
||||
("Année la plus prolifique (thèmes filtrés)", "2020 (1 sets)"),
|
||||
("Set avec le plus de pièces (thèmes filtrés)", "124-1 - B (200 pièces)"),
|
||||
("Set avec le moins de pièces (thèmes filtrés)", "123-1 - A (100 pièces)"),
|
||||
("Set le plus ancien (thèmes filtrés)", "123-1 - A (2020)"),
|
||||
("Set le plus récent (thèmes filtrés)", "124-1 - B (2021)"),
|
||||
("Nombre moyen de pièces des sets possédés", "100.00"),
|
||||
("Nombre moyen de pièces des sets manquants", "200.00"),
|
||||
]
|
||||
|
||||
|
||||
def test_write_stats_csv_outputs_two_columns(tmp_path) -> None:
|
||||
"""Écrit un CSV simple avec libellé et valeur."""
|
||||
destination = tmp_path / "stats.csv"
|
||||
stats = [("A", "1"), ("B", "2")]
|
||||
|
||||
write_stats_csv(destination, stats)
|
||||
|
||||
assert destination.read_text() == "libelle,valeur\nA,1\nB,2\n"
|
||||
Reference in New Issue
Block a user