Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -150,11 +150,13 @@ def eval_latest(run_dir_text):
|
|
| 150 |
f"--output_dir='{eval_out_dir}'"
|
| 151 |
)
|
| 152 |
rc, tail = _run(cmd, elog)
|
| 153 |
-
|
| 154 |
-
|
|
|
|
| 155 |
metrics_txt = "(metrics.json not found)"
|
| 156 |
p = pathlib.Path(eval_out_dir) / "metrics.json"
|
| 157 |
-
|
|
|
|
| 158 |
m = re.findall(r"\{[^}]+pc_success[^}]+\}", tail, flags=re.S)
|
| 159 |
if m:
|
| 160 |
try:
|
|
@@ -169,19 +171,18 @@ def eval_latest(run_dir_text):
|
|
| 169 |
metrics_txt = f"Success rate: {out['success_rate']}\nAvg max overlap: {out['avg_max_overlap']}"
|
| 170 |
except Exception:
|
| 171 |
pass
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
metrics_txt = "(metrics.json not found)"
|
| 175 |
-
p = pathlib.Path(eval_out_dir) / "metrics.json"
|
| 176 |
-
if p.exists():
|
| 177 |
try:
|
| 178 |
-
|
| 179 |
-
metrics_txt = f"Success rate: {
|
| 180 |
except Exception:
|
| 181 |
metrics_txt = "(could not parse metrics.json)"
|
|
|
|
|
|
|
| 182 |
msg = f"Evaluated run at: {run_dir}\nEval exited rc={rc}\n\n=== eval.log tail ===\n{tail}"
|
| 183 |
return msg, run_dir, tail_file(elog), metrics_txt
|
| 184 |
-
|
| 185 |
# ---------- Maintenance (list / delete runs) ----------
|
| 186 |
def list_runs():
|
| 187 |
root = pathlib.Path(RUN_ROOT)
|
|
|
|
| 150 |
f"--output_dir='{eval_out_dir}'"
|
| 151 |
)
|
| 152 |
rc, tail = _run(cmd, elog)
|
| 153 |
+
|
| 154 |
+
# --- optional patch: parse the printed dict and write metrics.json ---
|
| 155 |
+
import re, ast, json, pathlib
|
| 156 |
metrics_txt = "(metrics.json not found)"
|
| 157 |
p = pathlib.Path(eval_out_dir) / "metrics.json"
|
| 158 |
+
|
| 159 |
+
# Try to parse the last dict-like summary from the log tail
|
| 160 |
m = re.findall(r"\{[^}]+pc_success[^}]+\}", tail, flags=re.S)
|
| 161 |
if m:
|
| 162 |
try:
|
|
|
|
| 171 |
metrics_txt = f"Success rate: {out['success_rate']}\nAvg max overlap: {out['avg_max_overlap']}"
|
| 172 |
except Exception:
|
| 173 |
pass
|
| 174 |
+
elif p.exists():
|
| 175 |
+
# Fallback: if a previous metrics.json exists, show it
|
|
|
|
|
|
|
|
|
|
| 176 |
try:
|
| 177 |
+
d = json.loads(p.read_text())
|
| 178 |
+
metrics_txt = f"Success rate: {d.get('success_rate')}\nAvg max overlap: {d.get('avg_max_overlap')}"
|
| 179 |
except Exception:
|
| 180 |
metrics_txt = "(could not parse metrics.json)"
|
| 181 |
+
# --- end patch ---
|
| 182 |
+
|
| 183 |
msg = f"Evaluated run at: {run_dir}\nEval exited rc={rc}\n\n=== eval.log tail ===\n{tail}"
|
| 184 |
return msg, run_dir, tail_file(elog), metrics_txt
|
| 185 |
+
|
| 186 |
# ---------- Maintenance (list / delete runs) ----------
|
| 187 |
def list_runs():
|
| 188 |
root = pathlib.Path(RUN_ROOT)
|