forked from huggingface/nanoVLM
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathrun_evaluation.py
More file actions
65 lines (53 loc) · 2.48 KB
/
run_evaluation.py
File metadata and controls
65 lines (53 loc) · 2.48 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import argparse
import os
import json
import torch
from models.vision_language_model import VisionLanguageModel
import models.config as config
def main():
parser = argparse.ArgumentParser(description="Run lmms-eval on a model checkpoint.")
parser.add_argument('--checkpoint_path', type=str, help="Path to the model checkpoint directory.")
parser.add_argument('--global_step', type=int, help="Global step at which the checkpoint was saved.")
parser.add_argument('--run_name', type=str, help="The name of the training run.")
# These arguments are based on TrainConfig, passed from the eval.slurm script
parser.add_argument('--tasks', type=str, default='mmstar,mmmu,ocrbench,textvqa', help='Tasks for lmms-eval, comma-separated.')
parser.add_argument('--limit', type=int, default=None, help='Limit for lmms-eval.')
parser.add_argument('--batch_size', type=int, default=128, help='Batch size for lmms-eval.')
args = parser.parse_args()
from evaluation import cli_evaluate
model = VisionLanguageModel.from_pretrained(args.checkpoint_path)
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)
model.eval()
print("Running lmms-eval...")
eval_args = argparse.Namespace(
model=model,
tasks=args.tasks,
limit=args.limit,
batch_size=args.batch_size,
process_with_media=True,
device=device,
)
eval_results = cli_evaluate(eval_args)
output_data = {
'global_step': args.global_step,
'results': {}
}
if eval_results is not None and "results" in eval_results[0]:
print("Processing evaluation results.")
for task_name, task_results in eval_results[0]["results"].items():
for metric_name, metric_value in task_results.items():
if isinstance(metric_value, (int, float)):
key = f"{task_name}_{metric_name.split(',')[0]}"
output_data['results'][key] = metric_value
else:
print("No evaluation results to process.")
output_dir = os.path.join('eval_results', args.run_name)
os.makedirs(output_dir, exist_ok=True)
sanitized_tasks = args.tasks.replace("/", "_")
output_path = os.path.join(output_dir, f'step_{args.global_step}_{sanitized_tasks}.json')
with open(output_path, 'w') as f:
json.dump(output_data, f, indent=4)
print(f"Evaluation results for step {args.global_step} saved to {output_path}")
if __name__ == "__main__":
main()