|
|
|
|
|
|
|
|
|
|
|
""" |
|
|
predict_models_dir.py |
|
|
|
|
|
Predict for all models in models_dir on a folder of FASTA genomes. |
|
|
Optionally annotate with ground truth from a TSV and compute the same metrics |
|
|
as in your original script (overall + Isolate + MAG + AUC). |
|
|
|
|
|
Inputs: |
|
|
--genomes_dir Folder with FASTA files (.fna/.fa/.fasta) |
|
|
--models_dir Folder with model_*.joblib + feature_columns_*.json |
|
|
--outdir Output folder |
|
|
--truth_tsv OPTIONAL: genomes-all_metadata_with_genetic_code_id_noNA.tsv |
|
|
(must contain Genome, Genome_type, Genetic_code_ID) |
|
|
|
|
|
Ground truth (if provided): |
|
|
ALT = Genetic_code_ID != 11 |
|
|
STD = Genetic_code_ID == 11 |
|
|
|
|
|
Outputs: |
|
|
- <outdir>/<model>__pred.csv (per model, per genome) |
|
|
- <outdir>/all_models_predictions_long.csv (long: model x genome) |
|
|
- <outdir>/prediction_summary.csv (ONLY if truth_tsv is provided) |
|
|
- <outdir>/top_models_by_pr_auc.txt (ONLY if truth_tsv is provided) |
|
|
|
|
|
Requires: |
|
|
- aragorn in PATH (or pass --aragorn) |
|
|
""" |
|
|
|
|
|
import os |
|
|
import re |
|
|
import json |
|
|
import time |
|
|
import argparse |
|
|
import subprocess |
|
|
from pathlib import Path |
|
|
from collections import Counter |
|
|
|
|
|
import numpy as np |
|
|
import pandas as pd |
|
|
from joblib import load as joblib_load |
|
|
|
|
|
from sklearn.metrics import ( |
|
|
confusion_matrix, |
|
|
accuracy_score, |
|
|
precision_score, |
|
|
recall_score, |
|
|
f1_score, |
|
|
roc_auc_score, |
|
|
average_precision_score, |
|
|
) |
|
|
|
|
|
from sklearn.base import BaseEstimator, ClassifierMixin, clone |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class PUBaggingClassifier(BaseEstimator, ClassifierMixin): |
|
|
def __init__(self, base_estimator, n_bags=15, u_ratio=3.0, random_state=42): |
|
|
self.base_estimator = base_estimator |
|
|
self.n_bags = int(n_bags) |
|
|
self.u_ratio = float(u_ratio) |
|
|
self.random_state = int(random_state) |
|
|
self.models_ = None |
|
|
self.classes_ = np.array([0, 1], dtype=int) |
|
|
|
|
|
def fit(self, X, y, sample_weight=None): |
|
|
y = np.asarray(y).astype(int) |
|
|
pos_idx = np.where(y == 1)[0] |
|
|
unl_idx = np.where(y == 0)[0] |
|
|
if pos_idx.size == 0: |
|
|
raise ValueError("PU training requires at least one positive sample (y==1).") |
|
|
|
|
|
rng = np.random.RandomState(self.random_state) |
|
|
self.models_ = [] |
|
|
|
|
|
if unl_idx.size == 0: |
|
|
m = clone(self.base_estimator) |
|
|
try: |
|
|
if sample_weight is not None: |
|
|
m.fit(X, y, sample_weight=np.asarray(sample_weight)) |
|
|
else: |
|
|
m.fit(X, y) |
|
|
except TypeError: |
|
|
m.fit(X, y) |
|
|
self.models_.append(m) |
|
|
return self |
|
|
|
|
|
k_u = int(min(unl_idx.size, max(1, round(self.u_ratio * pos_idx.size)))) |
|
|
for _ in range(self.n_bags): |
|
|
u_b = rng.choice(unl_idx, size=k_u, replace=(k_u > unl_idx.size)) |
|
|
idx_b = np.concatenate([pos_idx, u_b]) |
|
|
X_b = X.iloc[idx_b] if hasattr(X, "iloc") else X[idx_b] |
|
|
y_b = y[idx_b] |
|
|
|
|
|
sw_b = None |
|
|
if sample_weight is not None: |
|
|
sw_b = np.asarray(sample_weight)[idx_b] |
|
|
|
|
|
m = clone(self.base_estimator) |
|
|
try: |
|
|
if sw_b is not None: |
|
|
m.fit(X_b, y_b, sample_weight=sw_b) |
|
|
else: |
|
|
m.fit(X_b, y_b) |
|
|
except TypeError: |
|
|
m.fit(X_b, y_b) |
|
|
|
|
|
self.models_.append(m) |
|
|
return self |
|
|
|
|
|
def predict_proba(self, X): |
|
|
if not self.models_: |
|
|
raise RuntimeError("PUBaggingClassifier not fitted") |
|
|
probs = [m.predict_proba(X) for m in self.models_] |
|
|
return np.mean(np.stack(probs, axis=0), axis=0) |
|
|
|
|
|
def predict(self, X): |
|
|
return (self.predict_proba(X)[:, 1] >= 0.5).astype(int) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
CODON_RE = re.compile(r"\(([ACGTUacgtu]{3})\)") |
|
|
|
|
|
def set_single_thread_env(): |
|
|
os.environ["OMP_NUM_THREADS"] = "1" |
|
|
os.environ["OPENBLAS_NUM_THREADS"] = "1" |
|
|
os.environ["MKL_NUM_THREADS"] = "1" |
|
|
os.environ["VECLIB_MAXIMUM_THREADS"] = "1" |
|
|
os.environ["NUMEXPR_NUM_THREADS"] = "1" |
|
|
|
|
|
def list_fasta_files(genomes_dir: str): |
|
|
exts = (".fna", ".fa", ".fasta") |
|
|
paths = [] |
|
|
for fn in os.listdir(genomes_dir): |
|
|
p = os.path.join(genomes_dir, fn) |
|
|
if not os.path.isfile(p): |
|
|
continue |
|
|
if fn.endswith(exts): |
|
|
paths.append(p) |
|
|
return sorted(paths) |
|
|
|
|
|
def calc_gc_and_tetra(fasta_path): |
|
|
bases = ["A", "C", "G", "T"] |
|
|
all_kmers = ["".join([a, b, c, d]) for a in bases for b in bases for c in bases for d in bases] |
|
|
tetra_counts = {k: 0 for k in all_kmers} |
|
|
|
|
|
A = C = G = T = 0 |
|
|
tail = "" |
|
|
|
|
|
with open(fasta_path, "r") as fh: |
|
|
for line in fh: |
|
|
if line.startswith(">"): |
|
|
continue |
|
|
s = line.strip().upper().replace("U", "T") |
|
|
s = re.sub(r"[^ACGT]", "N", s) |
|
|
if not s: |
|
|
continue |
|
|
|
|
|
seq = tail + s |
|
|
|
|
|
for ch in s: |
|
|
if ch == "A": A += 1 |
|
|
elif ch == "C": C += 1 |
|
|
elif ch == "G": G += 1 |
|
|
elif ch == "T": T += 1 |
|
|
|
|
|
for i in range(len(seq) - 3): |
|
|
k = seq[i:i+4] |
|
|
if "N" in k: |
|
|
continue |
|
|
tetra_counts[k] += 1 |
|
|
|
|
|
tail = seq[-3:] if len(seq) >= 3 else seq |
|
|
|
|
|
total_acgt = A + C + G + T |
|
|
gc_percent = (float(G + C) / float(total_acgt) * 100.0) if total_acgt > 0 else 0.0 |
|
|
|
|
|
windows_total = sum(tetra_counts.values()) |
|
|
denom = float(windows_total) if windows_total > 0 else 1.0 |
|
|
tetra_freq = {f"tetra_{k}": float(v) / denom for k, v in tetra_counts.items()} |
|
|
|
|
|
features = { |
|
|
"gc_percent": float(gc_percent), |
|
|
"genome_length": float(total_acgt), |
|
|
} |
|
|
features.update(tetra_freq) |
|
|
return features |
|
|
|
|
|
def run_aragorn(aragorn_bin, fasta_path, out_txt): |
|
|
cmd = [aragorn_bin, "-t", "-l", "-gc1", "-w", "-o", out_txt, fasta_path] |
|
|
with open(os.devnull, "w") as devnull: |
|
|
subprocess.run(cmd, stdout=devnull, stderr=devnull, check=False) |
|
|
|
|
|
def parse_anticodons_from_aragorn(aragorn_txt): |
|
|
counts = Counter() |
|
|
if not os.path.isfile(aragorn_txt): |
|
|
return counts |
|
|
with open(aragorn_txt, "r") as fh: |
|
|
for line in fh: |
|
|
for m in CODON_RE.finditer(line): |
|
|
cod = m.group(1).upper().replace("U", "T") |
|
|
if re.fullmatch(r"[ACGT]{3}", cod): |
|
|
counts[cod] += 1 |
|
|
return counts |
|
|
|
|
|
def build_ac_features(anticodon_counts): |
|
|
bases = ["A", "C", "G", "T"] |
|
|
feats = {} |
|
|
for a in bases: |
|
|
for b in bases: |
|
|
for c in bases: |
|
|
cod = f"{a}{b}{c}" |
|
|
feats[f"ac_{cod}"] = float(anticodon_counts.get(cod, 0)) |
|
|
return feats |
|
|
|
|
|
def build_plr_features(ac_features, needed_plr_cols, eps=0.5): |
|
|
plr_feats = {} |
|
|
for col in needed_plr_cols: |
|
|
core = col[len("plr_"):] |
|
|
left, right = core.split("__") |
|
|
a = ac_features.get(f"ac_{left}", 0.0) |
|
|
b = ac_features.get(f"ac_{right}", 0.0) |
|
|
plr_feats[col] = float(np.log((a + eps) / (b + eps))) |
|
|
return plr_feats |
|
|
|
|
|
def build_features_for_genome(fasta_path, aragorn_bin, feature_columns, reuse_aragorn=True): |
|
|
acc = os.path.splitext(os.path.basename(fasta_path))[0] |
|
|
|
|
|
feat_gc_tetra = calc_gc_and_tetra(fasta_path) |
|
|
|
|
|
tmp_aragorn = fasta_path + ".aragorn.txt" |
|
|
if reuse_aragorn and os.path.isfile(tmp_aragorn): |
|
|
try: |
|
|
if os.path.getmtime(tmp_aragorn) < os.path.getmtime(fasta_path): |
|
|
run_aragorn(aragorn_bin, fasta_path, tmp_aragorn) |
|
|
except Exception: |
|
|
run_aragorn(aragorn_bin, fasta_path, tmp_aragorn) |
|
|
else: |
|
|
run_aragorn(aragorn_bin, fasta_path, tmp_aragorn) |
|
|
|
|
|
anticodon_counts = parse_anticodons_from_aragorn(tmp_aragorn) |
|
|
ac_feats = build_ac_features(anticodon_counts) |
|
|
|
|
|
plr_cols = [c for c in feature_columns if c.startswith("plr_")] |
|
|
plr_feats = build_plr_features(ac_feats, plr_cols) if plr_cols else {} |
|
|
|
|
|
all_feats = {} |
|
|
all_feats.update(ac_feats) |
|
|
all_feats.update(plr_feats) |
|
|
all_feats.update(feat_gc_tetra) |
|
|
|
|
|
row = {col: float(all_feats.get(col, 0.0)) for col in feature_columns} |
|
|
return acc, row |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def load_truth_tsv(tsv_path: str) -> pd.DataFrame: |
|
|
df = pd.read_csv(tsv_path, sep="\t", dtype=str) |
|
|
|
|
|
for col in ["Genome", "Genome_type", "Genetic_code_ID"]: |
|
|
if col not in df.columns: |
|
|
raise ValueError(f"TSV missing column '{col}'. Columns: {list(df.columns)}") |
|
|
|
|
|
df["Genome"] = df["Genome"].astype(str) |
|
|
df["Genome_type"] = df["Genome_type"].astype(str) |
|
|
df["Genetic_code_ID"] = pd.to_numeric(df["Genetic_code_ID"], errors="coerce").astype("Int64") |
|
|
|
|
|
|
|
|
df["y_true_alt"] = df["Genetic_code_ID"].apply(lambda x: (pd.notna(x) and int(x) != 11)).astype(int) |
|
|
df["true_label"] = df["y_true_alt"].map({0: "STD", 1: "ALT"}) |
|
|
|
|
|
return df[["Genome", "Genome_type", "Genetic_code_ID", "y_true_alt", "true_label"]] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def safe_confusion(y_true, y_pred): |
|
|
cm = confusion_matrix(y_true, y_pred, labels=[0, 1]) |
|
|
tn, fp, fn, tp = int(cm[0,0]), int(cm[0,1]), int(cm[1,0]), int(cm[1,1]) |
|
|
return tn, fp, fn, tp |
|
|
|
|
|
def compute_metrics_block(y_true, y_pred, y_score=None): |
|
|
y_true = np.asarray(y_true, dtype=int) |
|
|
y_pred = np.asarray(y_pred, dtype=int) |
|
|
|
|
|
tn, fp, fn, tp = safe_confusion(y_true, y_pred) |
|
|
n = int(len(y_true)) |
|
|
pos = int(np.sum(y_true == 1)) |
|
|
|
|
|
out = { |
|
|
"n": n, |
|
|
"positives": pos, |
|
|
"tn": tn, "fp": fp, "fn": fn, "tp": tp, |
|
|
"accuracy": float(accuracy_score(y_true, y_pred)) if n else np.nan, |
|
|
"precision": float(precision_score(y_true, y_pred, zero_division=0)) if n else np.nan, |
|
|
"recall": float(recall_score(y_true, y_pred, zero_division=0)) if n else np.nan, |
|
|
"f1": float(f1_score(y_true, y_pred, zero_division=0)) if n else np.nan, |
|
|
"specificity": float(tn / (tn + fp)) if (tn + fp) > 0 else np.nan, |
|
|
"fn_rate": float(fn / (fn + tp)) if (fn + tp) > 0 else np.nan, |
|
|
"fp_rate": float(fp / (fp + tn)) if (fp + tn) > 0 else np.nan, |
|
|
} |
|
|
|
|
|
if y_score is not None: |
|
|
y_score = np.asarray(y_score, dtype=float) |
|
|
if n > 0 and len(np.unique(y_true)) == 2: |
|
|
try: |
|
|
out["roc_auc"] = float(roc_auc_score(y_true, y_score)) |
|
|
except Exception: |
|
|
out["roc_auc"] = np.nan |
|
|
try: |
|
|
out["pr_auc"] = float(average_precision_score(y_true, y_score)) |
|
|
except Exception: |
|
|
out["pr_auc"] = np.nan |
|
|
else: |
|
|
out["roc_auc"] = np.nan |
|
|
out["pr_auc"] = np.nan |
|
|
else: |
|
|
out["roc_auc"] = np.nan |
|
|
out["pr_auc"] = np.nan |
|
|
|
|
|
return out |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def find_models(models_dir: Path): |
|
|
return sorted(models_dir.glob("model_*.joblib")) |
|
|
|
|
|
def pick_feature_cols(models_dir: Path, feature_cols_arg: str | None): |
|
|
if feature_cols_arg: |
|
|
return Path(feature_cols_arg) |
|
|
p = models_dir / "feature_columns_64log_gc_tetra.json" |
|
|
if p.exists(): |
|
|
return p |
|
|
candidates = sorted(models_dir.glob("feature_columns_*.json")) |
|
|
if not candidates: |
|
|
raise FileNotFoundError(f"No feature_columns_*.json found in {models_dir}") |
|
|
return candidates[0] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def main(): |
|
|
ap = argparse.ArgumentParser( |
|
|
description="Predict for all models in a directory; optionally annotate truth from TSV and compute metrics." |
|
|
) |
|
|
ap.add_argument("--genomes_dir", required=True, help="Folder with FASTA genomes (.fna/.fa/.fasta).") |
|
|
ap.add_argument("--models_dir", required=True, help="Folder with model_*.joblib + feature_columns_*.json.") |
|
|
ap.add_argument("--outdir", required=True, help="Output folder for CSV predictions.") |
|
|
ap.add_argument("--aragorn", default="aragorn", help="Path to ARAGORN binary.") |
|
|
ap.add_argument("--feature_cols", default=None, help="Optional: force a specific feature_columns_*.json.") |
|
|
ap.add_argument("--reuse_aragorn", action="store_true", help="Reuse *.aragorn.txt if it exists and is fresh.") |
|
|
ap.add_argument( |
|
|
"--truth_tsv", |
|
|
default=None, |
|
|
help="OPTIONAL: genomes-all_metadata_with_genetic_code_id_noNA.tsv (Genome, Genome_type, Genetic_code_ID).", |
|
|
) |
|
|
args = ap.parse_args() |
|
|
|
|
|
set_single_thread_env() |
|
|
|
|
|
genomes_dir = Path(args.genomes_dir) |
|
|
models_dir = Path(args.models_dir) |
|
|
outdir = Path(args.outdir) |
|
|
outdir.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
fasta_files = list_fasta_files(str(genomes_dir)) |
|
|
if not fasta_files: |
|
|
raise SystemExit(f"No FASTA files found in {genomes_dir}") |
|
|
|
|
|
models = find_models(models_dir) |
|
|
if not models: |
|
|
raise SystemExit(f"No model_*.joblib found in {models_dir}") |
|
|
|
|
|
feat_cols_path = pick_feature_cols(models_dir, args.feature_cols) |
|
|
|
|
|
print(f"[INFO] Genomes : {len(fasta_files)} in {genomes_dir}") |
|
|
print(f"[INFO] Models : {len(models)} in {models_dir}") |
|
|
print(f"[INFO] FeatCols: {feat_cols_path}") |
|
|
print(f"[INFO] Truth : {args.truth_tsv if args.truth_tsv else '(none)'}") |
|
|
|
|
|
|
|
|
with open(feat_cols_path, "r") as fh: |
|
|
feature_columns = json.load(fh) |
|
|
|
|
|
|
|
|
truth = None |
|
|
if args.truth_tsv: |
|
|
truth = load_truth_tsv(args.truth_tsv) |
|
|
|
|
|
|
|
|
t_feat0 = time.time() |
|
|
rows, accs = [], [] |
|
|
for i, fasta in enumerate(fasta_files, 1): |
|
|
if i % 50 == 0 or i == 1 or i == len(fasta_files): |
|
|
print(f"[FEAT] {i}/{len(fasta_files)} {os.path.basename(fasta)}") |
|
|
acc, feats = build_features_for_genome( |
|
|
fasta_path=fasta, |
|
|
aragorn_bin=args.aragorn, |
|
|
feature_columns=feature_columns, |
|
|
reuse_aragorn=args.reuse_aragorn |
|
|
) |
|
|
accs.append(acc) |
|
|
rows.append(feats) |
|
|
|
|
|
X = pd.DataFrame(rows, index=accs)[feature_columns] |
|
|
print(f"[FEAT] Built X={X.shape} in {(time.time()-t_feat0):.1f}s") |
|
|
|
|
|
|
|
|
ann = pd.DataFrame({"Genome": accs}) |
|
|
|
|
|
if truth is not None: |
|
|
ann = ann.merge(truth, how="left", on="Genome") |
|
|
n_annot = int(ann["y_true_alt"].notna().sum()) |
|
|
n_missing = int(len(ann) - n_annot) |
|
|
print(f"[TRUTH] Annotated: {n_annot}/{len(ann)} Missing_in_TSV: {n_missing}") |
|
|
else: |
|
|
|
|
|
ann["Genome_type"] = pd.NA |
|
|
ann["Genetic_code_ID"] = pd.NA |
|
|
ann["y_true_alt"] = pd.NA |
|
|
ann["true_label"] = pd.NA |
|
|
|
|
|
long_rows = [] |
|
|
summary_rows = [] |
|
|
|
|
|
for mi, model_path in enumerate(models, 1): |
|
|
model_name = model_path.stem |
|
|
print("\n" + "="*80) |
|
|
print(f"[{mi}/{len(models)}] MODEL: {model_path.name}") |
|
|
print("="*80) |
|
|
|
|
|
t0 = time.time() |
|
|
model = joblib_load(model_path) |
|
|
|
|
|
|
|
|
proba = None |
|
|
if hasattr(model, "predict_proba"): |
|
|
try: |
|
|
proba = model.predict_proba(X)[:, 1] |
|
|
except Exception: |
|
|
proba = None |
|
|
|
|
|
|
|
|
if hasattr(model, "predict"): |
|
|
try: |
|
|
yhat = model.predict(X) |
|
|
except Exception: |
|
|
yhat = (proba >= 0.5).astype(int) if proba is not None else np.zeros(len(X), dtype=int) |
|
|
else: |
|
|
yhat = (proba >= 0.5).astype(int) if proba is not None else np.zeros(len(X), dtype=int) |
|
|
|
|
|
elapsed = time.time() - t0 |
|
|
|
|
|
df_pred = ann.copy() |
|
|
df_pred["model"] = model_name |
|
|
df_pred["y_pred_alt"] = np.asarray(yhat).astype(int) |
|
|
df_pred["pred_label"] = df_pred["y_pred_alt"].map({0: "STD", 1: "ALT"}) |
|
|
df_pred["proba_alt"] = np.asarray(proba, dtype=float) if proba is not None else np.nan |
|
|
|
|
|
out_csv = outdir / f"{model_name}__pred.csv" |
|
|
df_pred.to_csv(out_csv, index=False) |
|
|
print(f"[WRITE] {out_csv} rows={len(df_pred)} time={(elapsed/60):.2f} min") |
|
|
|
|
|
|
|
|
keep_cols = ["model", "Genome", "Genome_type", "Genetic_code_ID", "y_true_alt", "y_pred_alt", "proba_alt"] |
|
|
keep_cols = [c for c in keep_cols if c in df_pred.columns] |
|
|
long_rows.extend(df_pred[keep_cols].to_dict(orient="records")) |
|
|
|
|
|
|
|
|
if truth is not None: |
|
|
df_eval = df_pred[df_pred["y_true_alt"].notna()].copy() |
|
|
if df_eval.shape[0] == 0: |
|
|
print("[METRICS] No annotated genomes for this run (truth TSV did not match Genome names).") |
|
|
continue |
|
|
|
|
|
y_true = df_eval["y_true_alt"].astype(int).values |
|
|
y_pred = df_eval["y_pred_alt"].astype(int).values |
|
|
y_score = df_eval["proba_alt"].astype(float).values if proba is not None else None |
|
|
|
|
|
overall = compute_metrics_block(y_true, y_pred, y_score=y_score) |
|
|
|
|
|
def subset_metrics(gen_type: str): |
|
|
sub = df_eval[df_eval["Genome_type"] == gen_type] |
|
|
if sub.shape[0] == 0: |
|
|
return None |
|
|
yt = sub["y_true_alt"].astype(int).values |
|
|
yp = sub["y_pred_alt"].astype(int).values |
|
|
ys = sub["proba_alt"].astype(float).values if proba is not None else None |
|
|
return compute_metrics_block(yt, yp, y_score=ys) |
|
|
|
|
|
iso = subset_metrics("Isolate") |
|
|
mag = subset_metrics("MAG") |
|
|
|
|
|
srow = { |
|
|
"model": model_name, |
|
|
"model_file": str(model_path), |
|
|
"feature_cols": str(feat_cols_path), |
|
|
"n_genomes_total": int(len(df_pred)), |
|
|
"n_annotated": int(df_eval.shape[0]), |
|
|
"n_missing_truth": int(len(df_pred) - df_eval.shape[0]), |
|
|
"elapsed_sec": float(elapsed), |
|
|
"elapsed_min": float(elapsed/60.0), |
|
|
} |
|
|
|
|
|
for k, v in overall.items(): |
|
|
srow[f"overall_{k}"] = v |
|
|
|
|
|
if iso is not None: |
|
|
for k, v in iso.items(): |
|
|
srow[f"isolate_{k}"] = v |
|
|
|
|
|
if mag is not None: |
|
|
for k, v in mag.items(): |
|
|
srow[f"mag_{k}"] = v |
|
|
|
|
|
summary_rows.append(srow) |
|
|
|
|
|
|
|
|
long_csv = outdir / "all_models_predictions_long.csv" |
|
|
pd.DataFrame(long_rows).to_csv(long_csv, index=False) |
|
|
print(f"\n[WRITE] {long_csv} rows={len(long_rows)}") |
|
|
|
|
|
if truth is not None: |
|
|
summary_csv = outdir / "prediction_summary.csv" |
|
|
df_sum = pd.DataFrame(summary_rows) |
|
|
df_sum.to_csv(summary_csv, index=False) |
|
|
print(f"[WRITE] {summary_csv} rows={len(df_sum)}") |
|
|
|
|
|
|
|
|
if not df_sum.empty and "overall_pr_auc" in df_sum.columns: |
|
|
df_rank = df_sum.sort_values(["overall_pr_auc", "overall_roc_auc"], ascending=False, na_position="last") |
|
|
report_path = outdir / "top_models_by_pr_auc.txt" |
|
|
cols = [ |
|
|
"model", |
|
|
"n_annotated", |
|
|
"overall_positives", |
|
|
"overall_precision", |
|
|
"overall_recall", |
|
|
"overall_f1", |
|
|
"overall_pr_auc", |
|
|
"overall_roc_auc", |
|
|
"isolate_fn", "isolate_fp", "mag_fn", "mag_fp", |
|
|
"elapsed_min", |
|
|
] |
|
|
cols = [c for c in cols if c in df_rank.columns] |
|
|
with open(report_path, "w", encoding="utf-8") as f: |
|
|
f.write("Top models by overall PR-AUC (ALT = Genetic_code_ID != 11)\n") |
|
|
f.write(df_rank[cols].head(25).to_string(index=False)) |
|
|
f.write("\n") |
|
|
print(f"[WRITE] {report_path}") |
|
|
|
|
|
print("[DONE]") |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|