-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathparallel_generate.py
More file actions
490 lines (428 loc) · 18.3 KB
/
parallel_generate.py
File metadata and controls
490 lines (428 loc) · 18.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
#!/usr/bin/env python3
"""
parallel_generate.py -- Parallel content generation for Bootstrap Basil.
Spawns N worker processes that run sessions concurrently until a target
number of graded turns is reached, then optionally trains and evaluates.
Usage:
python parallel_generate.py --target-turns 10000 --workers 20
python parallel_generate.py --target-turns 10000 --workers 20 --no-train
python parallel_generate.py --target-turns 5000 --workers 10 --device cpu
Worker #1 prints verbose output to stdout (so you can watch a session).
Workers 2-N redirect output to logs/worker_{N:02d}.log.
The main process prints a periodic status line.
"""
import os
import sys
import json
import time
import signal
import argparse
import multiprocessing as mp
from datetime import datetime, timedelta
from contextlib import redirect_stdout, redirect_stderr
from config import (
LOG_DIR,
MODELS_DIR,
get_basil_model_name,
get_train_every_usable_turns,
DEFAULT_WORKERS,
)
# ---------------------------------------------------------------------------
# Shared state
# ---------------------------------------------------------------------------
# These are set in main() and passed to workers via initializer.
_shared_turn_count: mp.Value = None # total graded turns so far
_shared_usable_count: mp.Value = None # usable turns (nonzero LoRA weight)
_stop_event: mp.Event = None # signal workers to stop
_shared_target: mp.Value = None # dynamic target (updated by monitor)
def _debug_log(run_id: str, hypothesis_id: str, location: str, message: str, data: dict):
# region agent log
try:
payload = {
"id": f"log_{int(time.time() * 1000)}_{os.getpid()}",
"timestamp": int(time.time() * 1000),
"runId": run_id,
"hypothesisId": hypothesis_id,
"location": location,
"message": message,
"data": data,
}
with open("/home/ubuntu/bootstrap-basil/bootstrap-basil/.cursor/debug.log", "a", encoding="utf-8") as f:
f.write(json.dumps(payload, ensure_ascii=True) + "\n")
except Exception:
pass
# endregion
def _init_worker(turn_count, usable_count, stop_event, target_value):
"""Initializer for each worker process -- stores shared objects as globals."""
global _shared_turn_count, _shared_usable_count, _stop_event, _shared_target
_shared_turn_count = turn_count
_shared_usable_count = usable_count
_stop_event = stop_event
_shared_target = target_value
signal.signal(signal.SIGINT, signal.SIG_IGN)
# ---------------------------------------------------------------------------
# Worker function
# ---------------------------------------------------------------------------
def _worker_loop(worker_id: int, batch_id: str, verbose_to_stdout: bool, device: str = None):
"""
Run sessions in a loop until the stop event is set or the target is reached.
Args:
worker_id: 1-based worker identifier
batch_id: Shared batch ID for file naming
verbose_to_stdout: If True, print verbose output to real stdout
device: Force device ('cpu' or 'cuda') for Basil model
"""
# Force device if requested (before any torch imports in session code)
if device == "cpu":
os.environ["CUDA_VISIBLE_DEVICES"] = ""
# Suppress safetensors/transformers loading progress bars in all workers
os.environ["TRANSFORMERS_NO_ADVISORY_WARNINGS"] = "1"
os.environ["HF_HUB_DISABLE_PROGRESS_BARS"] = "1"
os.environ["TQDM_DISABLE"] = "1"
import logging
logging.getLogger("transformers").setLevel(logging.ERROR)
# Import session runners here (after fork, to avoid CUDA issues)
from auto_session import run_session
from prompts.storytime.storytime_session import run_storytime_session
from prompts.howitworks.howitworks_session import run_howitworks_session
from whychain_session import run_whychain_session
# Per-worker batch files
os.makedirs(LOG_DIR, exist_ok=True)
w_tag = f"w{worker_id:02d}"
batch_graded_path = os.path.join(LOG_DIR, f"batch_{batch_id}_{w_tag}_graded.jsonl")
batch_sessions_path = os.path.join(LOG_DIR, f"batch_{batch_id}_{w_tag}_sessions.jsonl")
batch_meta_path = os.path.join(LOG_DIR, f"batch_{batch_id}_{w_tag}_meta.jsonl")
# Set up output redirection for non-primary workers
log_file = None
if not verbose_to_stdout:
log_path = os.path.join(LOG_DIR, f"worker_{worker_id:02d}.log")
log_file = open(log_path, "w")
sys.stdout = log_file
sys.stderr = log_file
session_count = 0
try:
while not _stop_event.is_set():
# Check if we've reached the usable turn target (dynamic)
with _shared_usable_count.get_lock():
if _shared_usable_count.value >= _shared_target.value:
break
# Rotate session types: 0=classroom, 1=howitworks, 2=whychain, 3=storytime
session_type = session_count % 4
try:
if session_type == 0:
result = run_session(
include_sophie=True,
verbose=True,
training_phase="normal",
batch_graded_path=batch_graded_path,
batch_sessions_path=batch_sessions_path,
batch_meta_path=batch_meta_path,
)
elif session_type == 1:
result = run_howitworks_session(
verbose=True,
training_phase="howitworks",
batch_graded_path=batch_graded_path,
batch_sessions_path=batch_sessions_path,
batch_meta_path=batch_meta_path,
)
elif session_type == 2:
result = run_whychain_session(
verbose=True,
training_phase="whychain",
batch_graded_path=batch_graded_path,
batch_sessions_path=batch_sessions_path,
batch_meta_path=batch_meta_path,
)
else:
result = run_storytime_session(
verbose=True,
training_phase="storytime",
batch_graded_path=batch_graded_path,
batch_sessions_path=batch_sessions_path,
batch_meta_path=batch_meta_path,
)
# Update shared counters
sm = result.get("session_metrics", {})
graded = sm.get("graded_turns_count", 0)
usable = sm.get("usable_turns_count", 0)
with _shared_turn_count.get_lock():
_shared_turn_count.value += graded
with _shared_usable_count.get_lock():
_shared_usable_count.value += usable
session_count += 1
except Exception as e:
print(f"[Worker {worker_id}] Session failed: {e}")
session_count += 1
time.sleep(2) # Brief pause before retry
finally:
if log_file:
log_file.flush()
log_file.close()
return session_count
# ---------------------------------------------------------------------------
# Main process: monitor + coordinate
# ---------------------------------------------------------------------------
def _format_elapsed(seconds: float) -> str:
"""Format elapsed time as 'Xh Ym' or 'Ym Zs'."""
if seconds < 60:
return f"{seconds:.0f}s"
minutes = seconds / 60
if minutes < 60:
return f"{minutes:.0f}m {seconds % 60:.0f}s"
hours = minutes / 60
return f"{hours:.0f}h {minutes % 60:.0f}m"
def run_parallel_generation(
target_turns: int = 10000,
num_workers: int = DEFAULT_WORKERS,
do_train: bool = True,
device: str = None,
):
"""
Main entry point for parallel content generation.
1. Identity probe
2. Spawn workers, generate until target
3. Optionally train + eval
4. Print summary and exit
"""
batch_id = datetime.now().strftime("%Y%m%d_%H%M%S")
start_time = time.time()
run_id = f"pg_{batch_id}_{os.getpid()}"
# region agent log
_debug_log(
run_id=run_id,
hypothesis_id="H3_target_override",
location="parallel_generate.py:run_parallel_generation_entry",
message="run_parallel_generation called",
data={
"target_turns_arg": int(target_turns),
"num_workers": int(num_workers),
"do_train": bool(do_train),
"device": device or "auto",
},
)
# endregion
print(f"\n{'=' * 60}")
print("Bootstrap Basil -- Parallel Content Generation")
print(f"{'=' * 60}")
print(f" Target graded turns : {target_turns:,}")
print(f" Workers : {num_workers}")
print(f" Batch ID : {batch_id}")
print(f" Device override : {device or 'auto'}")
print(f" Auto-train after : {'yes' if do_train else 'no'}")
print(f"{'=' * 60}\n")
# --- Step 1: Identity probe ---
try:
from identity_probe import run_identity_probe
model_version = os.path.basename(get_basil_model_name())
print(f"[Main] Running identity probe on {model_version}...")
run_identity_probe(label=f"pre_generate_{model_version}")
print(f"[Main] Identity probe complete.\n")
except Exception as e:
print(f"[Main] Identity probe failed (non-fatal): {e}\n")
# --- Step 2: Spawn workers ---
# Use 'spawn' context to avoid CUDA fork issues. When called from
# orchestrator.py (rather than __main__), the global start method may
# still be 'fork', which silently corrupts CUDA state in child procs.
# Snapshot pre-existing usable turns so dynamic target refresh can recompute
try:
from metrics_manager import load_rolling_metrics
_usable_before_run = load_rolling_metrics().get("usable_turns_since_last_train", 0)
except Exception:
_usable_before_run = 0
# region agent log
_debug_log(
run_id=run_id,
hypothesis_id="H4_refresh_baseline",
location="parallel_generate.py:usable_before_run_snapshot",
message="Captured usable baseline before run",
data={"usable_before_run": int(_usable_before_run), "initial_target_turns": int(target_turns)},
)
# endregion
ctx = mp.get_context("spawn")
turn_count = ctx.Value("i", 0)
usable_count = ctx.Value("i", 0)
stop_event = ctx.Event()
target_value = ctx.Value("i", target_turns)
# Build worker args: worker_id, batch_id, verbose_to_stdout, device
worker_args = []
for i in range(1, num_workers + 1):
verbose = (i == 1) # Only worker #1 prints to terminal
worker_args.append((i, batch_id, verbose, device))
processes = []
for args in worker_args:
p = ctx.Process(
target=_worker_entry,
args=(args, turn_count, usable_count, stop_event, target_value),
)
p.start()
processes.append(p)
print(f"[Main] Spawned {num_workers} workers. Worker #1 output below.\n")
print(f"{'─' * 60}")
# --- Monitor loop ---
_refresh_ticker = 0
try:
while True:
time.sleep(5)
current_usable = usable_count.value
current_total = turn_count.value
elapsed = time.time() - start_time
# Refresh dynamic target from assessment every ~30s
_refresh_ticker += 1
if _refresh_ticker % 6 == 0:
try:
from memory_manager import load_basil_assessment
cur_band = load_basil_assessment().get("age_band", 0)
new_target = max(0, get_train_every_usable_turns(cur_band) - _usable_before_run)
old_target = target_value.value
if new_target != old_target:
target_value.value = new_target
# region agent log
_debug_log(
run_id=run_id,
hypothesis_id="H3_target_override",
location="parallel_generate.py:dynamic_target_refresh",
message="Dynamic target updated",
data={
"old_target": int(old_target),
"new_target": int(new_target),
"age_band": int(cur_band),
"usable_before_run": int(_usable_before_run),
"current_usable": int(current_usable),
"current_total": int(current_total),
},
)
# endregion
sys.stdout.write(
f"\n[Main] Target updated: {old_target:,} -> {new_target:,} "
f"(age_band now {cur_band})\n"
)
sys.stdout.flush()
except Exception:
pass
current_target = target_value.value
if current_usable >= current_target:
# region agent log
_debug_log(
run_id=run_id,
hypothesis_id="H5_stop_condition",
location="parallel_generate.py:monitor_stop_reached",
message="Stop condition reached in monitor loop",
data={
"current_usable": int(current_usable),
"current_total": int(current_total),
"current_target": int(current_target),
},
)
# endregion
break
# Estimate remaining time based on usable turn rate
if current_usable > 0:
rate = current_usable / elapsed
remaining = (current_target - current_usable) / rate
eta_str = f"~{_format_elapsed(remaining)} remaining"
else:
eta_str = "estimating..."
# Print status line (overwrites previous)
status = (
f"\r[Progress] Usable: {current_usable:,} / {current_target:,} | "
f"Total: {current_total:,} | "
f"Elapsed: {_format_elapsed(elapsed)} | {eta_str} "
)
sys.stdout.write(status)
sys.stdout.flush()
# Check if all workers have exited
alive = sum(1 for p in processes if p.is_alive())
if alive == 0:
break
except KeyboardInterrupt:
print(f"\n\n[Main] Ctrl+C received. Signalling workers to stop...")
stop_event.set()
# Wait for all workers to finish
print(f"\n[Main] Waiting for workers to finish current sessions...")
for p in processes:
p.join(timeout=120) # Give 2 minutes max
if p.is_alive():
print(f"[Main] Worker {p.pid} still running, terminating...")
p.terminate()
p.join(timeout=5)
final_total = turn_count.value
final_usable = usable_count.value
elapsed = time.time() - start_time
print(f"\n{'=' * 60}")
print(f"Generation Complete")
print(f"{'=' * 60}")
print(f" Usable turns : {final_usable:,}")
print(f" Total graded : {final_total:,}")
if final_total > 0:
print(f" Usable rate : {final_usable / final_total:.1%}")
print(f" Elapsed : {_format_elapsed(elapsed)}")
if elapsed > 0:
print(f" Rate (total) : {final_total / (elapsed / 3600):,.0f} turns/hour")
print(f"{'=' * 60}\n")
# --- Step 3: Train + eval ---
if do_train and final_total > 0:
print("[Main] Starting training workflow...\n")
try:
from orchestrator import run_train_with_checkpoint_and_eval, _training_log
with _training_log("train"):
result = run_train_with_checkpoint_and_eval()
print(f"\n[Main] Training result: {result}")
except Exception as e:
print(f"[Main] Training failed: {e}")
elif not do_train:
print("[Main] Skipping training (--no-train). Run manually with:")
print(" python orchestrator.py train --force\n")
# --- Step 4: Summary ---
print(f"\n{'=' * 60}")
print("All done. Review results and re-launch when ready.")
print(f"{'=' * 60}\n")
def _worker_entry(args, turn_count, usable_count, stop_event, target_value):
"""
Entry point for a worker process.
Initializes shared state and runs the worker loop.
"""
_init_worker(turn_count, usable_count, stop_event, target_value)
worker_id, batch_id, verbose_to_stdout, device = args
_worker_loop(worker_id, batch_id, verbose_to_stdout, device)
# ---------------------------------------------------------------------------
# CLI
# ---------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(
description="Parallel content generation for Bootstrap Basil",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
python parallel_generate.py --target-turns 10000 --workers 20
python parallel_generate.py --target-turns 10000 --workers 20 --no-train
python parallel_generate.py --target-turns 5000 --workers 10 --device cpu
""",
)
parser.add_argument(
"--target-turns", type=int, default=10000,
help="Stop after this many graded turns (default: 10000)",
)
parser.add_argument(
"--workers", type=int, default=DEFAULT_WORKERS,
help=f"Number of parallel worker processes (default: {DEFAULT_WORKERS})",
)
parser.add_argument(
"--no-train", action="store_true",
help="Skip training after generation (generate only)",
)
parser.add_argument(
"--device", choices=["cpu", "cuda"], default=None,
help="Force device for Basil model inference (default: auto-detect)",
)
args = parser.parse_args()
run_parallel_generation(
target_turns=args.target_turns,
num_workers=args.workers,
do_train=not args.no_train,
device=args.device,
)
if __name__ == "__main__":
# Use 'spawn' start method to avoid CUDA fork issues
mp.set_start_method("spawn", force=True)
main()