1

360 lines
13 KiB
Python

# scripts/run_tree_models.py
from __future__ import annotations
from pathlib import Path
import sys
from typing import Iterable, Sequence
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier, GradientBoostingRegressor, GradientBoostingClassifier
from sklearn.metrics import (
mean_absolute_error,
mean_squared_error,
f1_score,
brier_score_loss,
average_precision_score,
)
PROJECT_ROOT = Path(__file__).resolve().parents[3]
if str(PROJECT_ROOT) not in sys.path:
sys.path.insert(0, str(PROJECT_ROOT))
from meteo.dataset import load_raw_csv
from model.features import build_feature_dataframe, FeatureSpec, _steps_from_minutes
from model.splits import chronological_split
CSV_PATH = Path("data/weather_minutely.csv")
DOC_DIR = Path(__file__).resolve().parent.parent
DATA_DIR = DOC_DIR / "data"
FIG_DIR = DOC_DIR / "figures"
HORIZONS_MINUTES: tuple[int, ...] = (10, 60, 360, 1440)
CONTINUOUS_TARGETS: tuple[str, ...] = ("temperature", "wind_speed")
RAIN_TARGET: str = "rain_rate"
DEFAULT_LAGS_BY_COL: dict[str, Sequence[int]] = {
"temperature": (10, 20, 30),
"wind_speed": (10, 20, 30),
"rain_rate": (10, 20, 30),
"humidity": (10, 20, 30),
"pressure": (10, 20, 30),
"illuminance": (10, 20, 30),
"wind_direction": (10, 20, 30),
"sun_elevation": (10, 20, 30),
}
USE_CORR_FILTER = True
CORR_THRESHOLD = 0.2
CORR_PATH = Path("docs/05 - Corrélations binaires avancées/data/correlation_matrix_lagged.csv")
LAG_MATRIX_PATH = Path("docs/05 - Corrélations binaires avancées/data/lag_matrix_minutes.csv")
TRAIN_SUBSAMPLE_STEP = 10 # prend 1 ligne sur 10 pour accélérer l'entraînement des arbres
def _align_target(
df: pd.DataFrame,
target_col: str,
horizon_minutes: int,
base_freq_minutes: int = 10,
) -> tuple[pd.DataFrame, pd.Series]:
steps = _steps_from_minutes(horizon_minutes, base_freq_minutes)
y = df[target_col].shift(-steps)
X_full = df.drop(columns=[target_col])
X = X_full.select_dtypes(include=["number", "bool"])
aligned = pd.concat([X, y.rename("target")], axis=1).dropna()
return aligned.drop(columns=["target"]), aligned["target"]
def _regression_scores(y_true: np.ndarray, y_pred: np.ndarray) -> dict[str, float]:
return {
"mae": float(mean_absolute_error(y_true, y_pred)),
"rmse": float(np.sqrt(mean_squared_error(y_true, y_pred))),
}
def _classification_scores(y_true: np.ndarray, proba: np.ndarray, threshold: float = 0.5) -> dict[str, float]:
y_pred = (proba >= threshold).astype(int)
return {
"f1": float(f1_score(y_true, y_pred, zero_division=0)),
"brier": float(brier_score_loss(y_true, proba)),
"ap": float(average_precision_score(y_true, proba)),
}
def _load_correlation_and_lag() -> tuple[pd.DataFrame | None, pd.DataFrame | None]:
corr_df = pd.read_csv(CORR_PATH, index_col=0) if CORR_PATH.exists() else None
lag_df = pd.read_csv(LAG_MATRIX_PATH, index_col=0) if LAG_MATRIX_PATH.exists() else None
return corr_df, lag_df
def _select_features_from_corr(
corr_df: pd.DataFrame | None,
targets: Sequence[str],
threshold: float,
) -> set[str]:
if corr_df is None:
return set()
selected: set[str] = set()
for target in targets:
if target not in corr_df.columns:
continue
corrs = corr_df[target].drop(labels=[target], errors="ignore")
strong = corrs[corrs.abs() >= threshold]
selected.update(strong.index.tolist())
return selected
def _build_lags_from_matrices(
lag_df: pd.DataFrame | None,
corr_df: pd.DataFrame | None,
selected_cols: Iterable[str],
default_lags: dict[str, Sequence[int]],
threshold: float,
) -> dict[str, Sequence[int]]:
mapping: dict[str, Sequence[int]] = {}
for col in selected_cols:
base = list(default_lags.get(col, (10, 20, 30)))
extra: set[int] = set()
if lag_df is not None and corr_df is not None and col in lag_df.index:
corrs = corr_df.loc[col]
for tgt, corr_val in corrs.items():
if tgt == col:
continue
if abs(corr_val) < threshold:
continue
lag_val = lag_df.loc[col, tgt]
if pd.notna(lag_val) and lag_val != 0:
extra.add(int(abs(round(float(lag_val)))))
merged = sorted({*base, *extra})
mapping[col] = merged
return mapping
def run_regression_models(train_df: pd.DataFrame, val_df: pd.DataFrame, test_df: pd.DataFrame) -> pd.DataFrame:
rows: list[dict[str, object]] = []
models = [
("rf", RandomForestRegressor(
n_estimators=25,
max_depth=8,
min_samples_leaf=3,
max_features="sqrt",
n_jobs=-1,
random_state=42,
max_samples=0.25,
)),
("gbrt", GradientBoostingRegressor(
n_estimators=50,
learning_rate=0.08,
max_depth=3,
subsample=0.8,
random_state=42,
)),
]
for target_col in CONTINUOUS_TARGETS:
for horizon in HORIZONS_MINUTES:
X_train, y_train = _align_target(train_df, target_col, horizon)
X_val, y_val = _align_target(val_df, target_col, horizon)
X_test, y_test = _align_target(test_df, target_col, horizon)
if y_train.empty or y_val.empty or y_test.empty:
continue
for model_name, model in models:
model.fit(X_train, y_train)
y_val_pred = model.predict(X_val)
y_test_pred = model.predict(X_test)
val_scores = _regression_scores(y_val, y_val_pred)
test_scores = _regression_scores(y_test, y_test_pred)
rows.append(
{
"target": target_col,
"horizon_min": horizon,
"model": model_name,
"split": "validation",
**val_scores,
}
)
rows.append(
{
"target": target_col,
"horizon_min": horizon,
"model": model_name,
"split": "test",
**test_scores,
}
)
return pd.DataFrame(rows)
def run_rain_models(train_df: pd.DataFrame, val_df: pd.DataFrame, test_df: pd.DataFrame) -> pd.DataFrame:
rows: list[dict[str, object]] = []
models = [
("rf", RandomForestClassifier(
n_estimators=40,
max_depth=8,
min_samples_leaf=3,
max_features="sqrt",
n_jobs=-1,
random_state=42,
class_weight="balanced",
max_samples=0.25,
)),
("gbrt", GradientBoostingClassifier(
n_estimators=50,
learning_rate=0.08,
max_depth=3,
subsample=0.8,
random_state=42,
)),
]
target_col = RAIN_TARGET
for horizon in HORIZONS_MINUTES:
X_train, y_train = _align_target(train_df, target_col, horizon)
X_val, y_val = _align_target(val_df, target_col, horizon)
X_test, y_test = _align_target(test_df, target_col, horizon)
y_train_bin = (y_train > 0).astype(int)
y_val_bin = (y_val > 0).astype(int)
y_test_bin = (y_test > 0).astype(int)
if y_train_bin.empty or y_val_bin.empty or y_test_bin.empty:
continue
for model_name, model in models:
model.fit(X_train, y_train_bin)
proba_val = model.predict_proba(X_val)[:, 1]
proba_test = model.predict_proba(X_test)[:, 1]
val_scores = _classification_scores(y_val_bin, proba_val)
test_scores = _classification_scores(y_test_bin, proba_test)
rows.append(
{
"target": "rain_binary",
"horizon_min": horizon,
"model": model_name,
"split": "validation",
**val_scores,
}
)
rows.append(
{
"target": "rain_binary",
"horizon_min": horizon,
"model": model_name,
"split": "test",
**test_scores,
}
)
return pd.DataFrame(rows)
def plot_regression_mae(reg_df: pd.DataFrame, output_path: Path) -> None:
output_path.parent.mkdir(parents=True, exist_ok=True)
df = reg_df[reg_df["split"] == "validation"]
targets = df["target"].unique()
models = df["model"].unique()
fig, axes = plt.subplots(len(targets), 1, figsize=(8, 4 * len(targets)), sharex=True)
if len(targets) == 1:
axes = [axes]
for ax, target in zip(axes, targets):
sub = df[df["target"] == target]
for model in models:
line = sub[sub["model"] == model].sort_values("horizon_min")
ax.plot(line["horizon_min"], line["mae"], marker="o", label=model)
ax.set_title(f"MAE {target} (validation)")
ax.set_ylabel("MAE")
ax.grid(True, linestyle=":", alpha=0.4)
axes[-1].set_xlabel("Horizon (minutes)")
axes[0].legend()
fig.tight_layout()
fig.savefig(output_path, dpi=150)
plt.close(fig)
def plot_rain_f1_brier(rain_df: pd.DataFrame, output_path: Path) -> None:
output_path.parent.mkdir(parents=True, exist_ok=True)
df = rain_df[rain_df["split"] == "validation"]
models = df["model"].unique()
fig, axes = plt.subplots(2, 1, figsize=(8, 6), sharex=True)
for metric, ax in zip(("f1", "brier"), axes):
for model in models:
line = df[df["model"] == model].sort_values("horizon_min")
ax.plot(line["horizon_min"], line[metric], marker="o", label=model)
ax.set_title(f"{metric.upper()} pluie (validation)" if metric == "f1" else "Brier pluie (validation)")
ax.set_ylabel(metric.upper() if metric == "f1" else "Brier")
ax.grid(True, linestyle=":", alpha=0.4)
axes[-1].set_xlabel("Horizon (minutes)")
axes[0].legend()
fig.tight_layout()
fig.savefig(output_path, dpi=150)
plt.close(fig)
def main() -> None:
if not CSV_PATH.exists():
print(f"⚠ Fichier introuvable : {CSV_PATH}")
return
df_raw = load_raw_csv(CSV_PATH)
corr_df, lag_df = _load_correlation_and_lag()
selected_from_corr = _select_features_from_corr(corr_df, CONTINUOUS_TARGETS + (RAIN_TARGET,), CORR_THRESHOLD) if USE_CORR_FILTER else set()
numeric_cols = df_raw.select_dtypes(include=["number", "bool"]).columns
if USE_CORR_FILTER and selected_from_corr:
selected_cols = [col for col in numeric_cols if col in selected_from_corr or col in CONTINUOUS_TARGETS or col == RAIN_TARGET]
else:
selected_cols = list(numeric_cols)
lags_mapping = _build_lags_from_matrices(
lag_df,
corr_df,
selected_cols,
default_lags=DEFAULT_LAGS_BY_COL,
threshold=CORR_THRESHOLD,
)
feature_spec = FeatureSpec(lags_minutes=lags_mapping)
df_feat = build_feature_dataframe(df_raw[selected_cols], feature_spec=feature_spec, target_columns=selected_cols)
train_df, val_df, test_df = chronological_split(df_feat, train_frac=0.7, val_frac=0.15)
if TRAIN_SUBSAMPLE_STEP > 1:
train_df = train_df.iloc[::TRAIN_SUBSAMPLE_STEP]
print(f"Dataset chargé : {CSV_PATH}")
print(f" Train : {len(train_df)} lignes")
print(f" Val : {len(val_df)} lignes")
print(f" Test : {len(test_df)} lignes")
print()
reg_results = run_regression_models(train_df, val_df, test_df)
rain_results = run_rain_models(train_df, val_df, test_df)
DATA_DIR.mkdir(parents=True, exist_ok=True)
reg_path = DATA_DIR / "models_tree_regression.csv"
rain_path = DATA_DIR / "models_tree_rain.csv"
reg_results.to_csv(reg_path, index=False)
rain_results.to_csv(rain_path, index=False)
FIG_DIR.mkdir(parents=True, exist_ok=True)
plot_regression_mae(reg_results, FIG_DIR / "models_tree_mae_validation.png")
plot_rain_f1_brier(rain_results, FIG_DIR / "models_tree_rain_validation.png")
print(f"✔ Résultats régression (arbres/GB) : {reg_path}")
print(f"✔ Résultats pluie (arbres/GB) : {rain_path}")
print()
print("=== Scores régression (validation) ===")
print(reg_results[reg_results["split"] == "validation"].to_string(index=False, float_format=lambda x: f"{x:.3f}"))
print()
print("=== Scores pluie (validation) ===")
print(rain_results[rain_results["split"] == "validation"].to_string(index=False, float_format=lambda x: f"{x:.3f}"))
if __name__ == "__main__":
main()