|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import argparse, json, re, sys |
|
|
from pathlib import Path |
|
|
import numpy as np |
|
|
import pandas as pd |
|
|
from sklearn.model_selection import StratifiedGroupKFold |
|
|
|
|
|
def extract_acc_base(acc: str) -> str: |
|
|
m = re.match(r'^(G[CF]A_\d+)', str(acc)) |
|
|
return m.group(1) if m else str(acc).split('.')[0] |
|
|
|
|
|
def load_bacterial_bases_from_supp1(supp1_csv: str) -> set: |
|
|
df = pd.read_csv(supp1_csv) |
|
|
cols = {c.lower().strip(): c for c in df.columns} |
|
|
dom_col = cols.get('domain of life') or cols.get('domain_of_life') or cols.get('domain') or 'domain of life' |
|
|
asm_col = cols.get('assembly') or cols.get('assembly accession') or 'assembly' |
|
|
if dom_col not in df.columns or asm_col not in df.columns: |
|
|
raise ValueError(f"Supp1.csv must contain columns similar to 'domain of life' and 'assembly'. Found: {list(df.columns)}") |
|
|
mask = df[dom_col].astype(str).str.strip().str.upper().eq('B') |
|
|
df_b = df.loc[mask, [asm_col]].dropna() |
|
|
bases = set(extract_acc_base(a) for a in df_b[asm_col].astype(str)) |
|
|
return bases |
|
|
|
|
|
def load_alt_bases_from_supp2_legacy(supp2_xlsx: str) -> set: |
|
|
""" |
|
|
ORIGINAL behavior (kept): column index-based extraction. |
|
|
WARNING: This assumes the 4th column (usecols=[3]) contains assembly IDs for the ALT label. |
|
|
""" |
|
|
df = pd.read_excel(supp2_xlsx, header=None, usecols=[3]) |
|
|
alt = ( |
|
|
df.iloc[:, 0] |
|
|
.dropna() |
|
|
.astype(str) |
|
|
.unique() |
|
|
.tolist() |
|
|
) |
|
|
return set(extract_acc_base(x) for x in alt) |
|
|
|
|
|
def load_contam_bases_from_supp2(supp2_xlsx: str) -> set: |
|
|
""" |
|
|
NEW: assemblies to REMOVE (contaminated), where: |
|
|
Evidence of assembly contamination with alt gen code == 'yes' |
|
|
Uses named columns: 'assembly' + 'Evidence of assembly contamination with alt gen code' |
|
|
""" |
|
|
df = pd.read_excel(supp2_xlsx) |
|
|
|
|
|
cols = {c.lower().strip(): c for c in df.columns} |
|
|
asm_col = cols.get('assembly') |
|
|
ev_col = cols.get('evidence of assembly contamination with alt gen code') |
|
|
|
|
|
if asm_col is None or ev_col is None: |
|
|
raise ValueError( |
|
|
"Supp2.xlsx must contain columns 'assembly' and " |
|
|
"'Evidence of assembly contamination with alt gen code' to filter contaminated rows. " |
|
|
f"Found columns: {list(df.columns)}" |
|
|
) |
|
|
|
|
|
mask = ( |
|
|
df[ev_col] |
|
|
.astype(str) |
|
|
.str.strip() |
|
|
.str.lower() |
|
|
.eq('yes') |
|
|
) |
|
|
|
|
|
contam_bases = ( |
|
|
df.loc[mask, asm_col] |
|
|
.dropna() |
|
|
.astype(str) |
|
|
.apply(extract_acc_base) |
|
|
.unique() |
|
|
.tolist() |
|
|
) |
|
|
|
|
|
return set(contam_bases) |
|
|
|
|
|
def read_ndjson_records(path: str): |
|
|
with open(path, 'r') as fh: |
|
|
for line in fh: |
|
|
line = line.strip() |
|
|
if not line: |
|
|
continue |
|
|
try: |
|
|
yield json.loads(line) |
|
|
except Exception: |
|
|
continue |
|
|
|
|
|
def main(): |
|
|
ap = argparse.ArgumentParser(description="Create grouped stratified 80/20 splits compatible with Mass_models.py") |
|
|
ap.add_argument('--ndjson', required=True, help='Input NDJSON file (one JSON per line)') |
|
|
ap.add_argument('--supp1', required=False, help='Optional Supp1.csv filter: keep only Bacteria (domain==B) by assembly') |
|
|
ap.add_argument('--supp2', required=True, help='Supp2.xlsx (used for legacy alt labels + contamination filter)') |
|
|
ap.add_argument('--outdir', required=True, help='Output directory root for subsets') |
|
|
ap.add_argument('--n_splits', type=int, default=1, help='Number of replicate 80/20 splits') |
|
|
ap.add_argument('--seed', type=int, default=42, help='Random seed') |
|
|
args = ap.parse_args() |
|
|
|
|
|
ndjson_path = Path(args.ndjson) |
|
|
if not ndjson_path.exists(): |
|
|
sys.exit(f"[ERR ] NDJSON not found: {ndjson_path}") |
|
|
|
|
|
bacteria_bases = None |
|
|
if args.supp1: |
|
|
print(f"[FILTER] Loading Supp1 (Bacteria-only by 'domain of life' & 'assembly')…") |
|
|
bacteria_bases = load_bacterial_bases_from_supp1(args.supp1) |
|
|
print(f"[FILTER] Allowed assembly bases: {len(bacteria_bases)}") |
|
|
|
|
|
|
|
|
alt_bases = load_alt_bases_from_supp2_legacy(args.supp2) |
|
|
print(f"[LABEL] Alt bases from Supp2 (legacy col[3]): {len(alt_bases)}") |
|
|
|
|
|
|
|
|
contam_bases = load_contam_bases_from_supp2(args.supp2) |
|
|
print(f"[FILTER] Contaminated bases from Supp2 where evidence == 'yes': {len(contam_bases)}") |
|
|
|
|
|
|
|
|
overlap = len(alt_bases & contam_bases) |
|
|
if overlap: |
|
|
print(f"[WARN ] Overlap alt vs contaminated: {overlap} bases (will be REMOVED from dataset)") |
|
|
|
|
|
print(f"[LOAD ] Reading NDJSON: {ndjson_path}") |
|
|
records, groups, y = [], [], [] |
|
|
dropped_contam = 0 |
|
|
dropped_supp1 = 0 |
|
|
|
|
|
for obj in read_ndjson_records(str(ndjson_path)): |
|
|
acc = obj.get("acc") |
|
|
if not acc: |
|
|
continue |
|
|
base = extract_acc_base(acc) |
|
|
|
|
|
|
|
|
if bacteria_bases is not None and base not in bacteria_bases: |
|
|
dropped_supp1 += 1 |
|
|
continue |
|
|
|
|
|
|
|
|
if base in contam_bases: |
|
|
dropped_contam += 1 |
|
|
continue |
|
|
|
|
|
records.append(obj) |
|
|
groups.append(base) |
|
|
y.append(1 if base in alt_bases else 0) |
|
|
|
|
|
if not records: |
|
|
sys.exit("[ERR ] No records after filtering. Check Supp1 filter / Supp2 contamination filter / NDJSON.") |
|
|
|
|
|
y = np.array(y, dtype=int) |
|
|
pos = int(y.sum()) |
|
|
print( |
|
|
f"[DATA ] kept={len(records)} | positives={pos} ({100.0*pos/len(records):.2f}%) | " |
|
|
f"groups={len(set(groups))} | dropped_contam={dropped_contam} | dropped_supp1={dropped_supp1}" |
|
|
) |
|
|
|
|
|
outroot = Path(args.outdir) |
|
|
outroot.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
sgkf = StratifiedGroupKFold(n_splits=5, shuffle=True, random_state=args.seed) |
|
|
|
|
|
for k in range(args.n_splits): |
|
|
subset_dir = outroot / f"subset{k+1:02d}" |
|
|
subset_dir.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
idx = np.arange(len(records)) |
|
|
tr_idx, te_idx = next(sgkf.split(idx, y, groups)) |
|
|
train_records = [records[i] for i in tr_idx] |
|
|
test_records = [records[i] for i in te_idx] |
|
|
|
|
|
with open(subset_dir / "train.jsonl", "w") as ftr: |
|
|
for r in train_records: |
|
|
ftr.write(json.dumps(r, separators=(',', ':')) + "\n") |
|
|
with open(subset_dir / "test.jsonl", "w") as fte: |
|
|
for r in test_records: |
|
|
fte.write(json.dumps(r, separators=(',', ':')) + "\n") |
|
|
|
|
|
y_tr = y[tr_idx]; y_te = y[te_idx] |
|
|
manifest = { |
|
|
"n_total": int(len(records)), |
|
|
"n_train": int(len(train_records)), |
|
|
"n_test": int(len(test_records)), |
|
|
"positives_total": int(y.sum()), |
|
|
"positives_train": int(y_tr.sum()), |
|
|
"positives_test": int(y_te.sum()), |
|
|
"pct_pos_total": float(100.0 * y.sum() / len(records)), |
|
|
"pct_pos_train": float(100.0 * y_tr.sum() / len(train_records)), |
|
|
"pct_pos_test": float(100.0 * y_te.sum() / len(test_records)), |
|
|
"groups_total": int(len(set(groups))), |
|
|
"seed": int(args.seed + k), |
|
|
"source_ndjson": str(ndjson_path.resolve()), |
|
|
"supp2_contam_removed": int(len(contam_bases)), |
|
|
} |
|
|
(subset_dir / "manifest.json").write_text(json.dumps(manifest, indent=2)) |
|
|
|
|
|
print(f"[WRITE] {subset_dir} | train={len(train_records)} test={len(test_records)} | pos_tr={int(y_tr.sum())} pos_te={int(y_te.sum())}") |
|
|
|
|
|
print("[DONE ] All subsets written.") |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|
|
|
|