-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathsetup_papers.py
More file actions
460 lines (380 loc) · 14.8 KB
/
setup_papers.py
File metadata and controls
460 lines (380 loc) · 14.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
"""
Download abstracts and PDFs to reconstruct the paper store for EvidenceSynthBench.
Usage:
python setup_papers.py download # Download abstracts + OA PDFs
python setup_papers.py download --abstracts-only # Abstracts only, skip PDF download
python setup_papers.py parse # Parse downloaded PDFs with GROBID
python setup_papers.py parse --grobid-url URL # Custom GROBID URL
"""
import argparse
import json
import re
import sys
import time
from pathlib import Path
import requests
from synthesisbench.resolve import (
API_DELAY,
batch_fetch_abstracts_and_titles,
check_openalex_oa,
check_pmc_availability,
)
from synthesisbench.fulltext import check_grobid_available
from synthesisbench.grobid_parser import GrobidSectionParser, GrobidParsingError
BASE_DIR = Path(__file__).resolve().parent
DATA_DIR = BASE_DIR / "data"
PAPERS_DIR = DATA_DIR / "paper_store"
PDFS_DIR = DATA_DIR / "pdfs"
SUBSETS_DIR = DATA_DIR / "subsets"
STUDIES_RIS = DATA_DIR / "studies.ris"
SUBSET_CONFIG = {
"rob": {
"manifest": "rob.json",
"task_dir": DATA_DIR / "tasks" / "rob",
"fulltext_required": True,
},
"eligibility": {
"manifest": "eligibility.json",
"task_dir": DATA_DIR / "tasks" / "eligibility",
"fulltext_required": True,
},
"directness": {
"manifest": "directness.json",
"task_dir": DATA_DIR / "tasks" / "directness",
"fulltext_required": False,
},
}
# ---------------------------------------------------------------------------
# PMID discovery
# ---------------------------------------------------------------------------
def discover_pmids_from_subsets() -> tuple[set[str], set[str], list[str]]:
"""Load public subset manifests and extract PMIDs from referenced task files.
Returns:
(all_pmids, fulltext_pmids, directness_task_ids)
"""
all_pmids: set[str] = set()
fulltext_pmids: set[str] = set()
directness_task_ids: list[str] = []
for task_type, config in SUBSET_CONFIG.items():
manifest_path = SUBSETS_DIR / config["manifest"]
manifest = json.loads(manifest_path.read_text())
task_ids = manifest["task_ids"]
if task_type == "directness":
directness_task_ids = task_ids
for task_id in task_ids:
task_path = config["task_dir"] / f"{task_id}.json"
if not task_path.exists():
print(f" WARNING: task file not found: {task_path}")
continue
task = json.loads(task_path.read_text())
if task_type == "directness":
for study in task["studies"]:
pmid = study.get("pmid")
if pmid:
all_pmids.add(pmid)
else:
pmid = task.get("pmid")
if pmid:
all_pmids.add(pmid)
if config["fulltext_required"]:
fulltext_pmids.add(pmid)
return all_pmids, fulltext_pmids, directness_task_ids
def parse_ris_pmids(ris_path: Path) -> set[str]:
"""Extract PMIDs from the AN field of a RIS file."""
pmids: set[str] = set()
for line in ris_path.read_text().splitlines():
match = re.match(r"^AN\s+-\s+(\S+)", line)
if match:
pmids.add(match.group(1))
return pmids
def discover_and_validate() -> tuple[set[str], set[str], list[str]]:
"""Discover PMIDs from subsets and RIS, cross-validate, return results.
Returns:
(all_pmids, fulltext_pmids, directness_task_ids)
"""
print("Discovering PMIDs from subset manifests...")
subset_pmids, fulltext_pmids, directness_task_ids = discover_pmids_from_subsets()
print(f" Found {len(subset_pmids)} unique PMIDs from task files")
print(f" {len(fulltext_pmids)} require fulltext (RoB + eligibility)")
print(f" {len(subset_pmids - fulltext_pmids)} abstract only (directness)")
print("Reading PMIDs from studies.ris...")
ris_pmids = parse_ris_pmids(STUDIES_RIS)
print(f" Found {len(ris_pmids)} PMIDs in studies.ris")
# Cross-validate
in_subsets_only = subset_pmids - ris_pmids
in_ris_only = ris_pmids - subset_pmids
if in_subsets_only or in_ris_only:
print("\nERROR: PMID mismatch between subsets and studies.ris!")
if in_subsets_only:
print(f" In subsets but not in studies.ris: {sorted(in_subsets_only)}")
if in_ris_only:
print(f" In studies.ris but not in subsets: {sorted(in_ris_only)}")
sys.exit(1)
print(f" Cross-validation passed: {len(subset_pmids)} PMIDs match\n")
return subset_pmids, fulltext_pmids, directness_task_ids
# ---------------------------------------------------------------------------
# Paper store helpers
# ---------------------------------------------------------------------------
def read_paper(pmid: str) -> dict | None:
"""Read a paper store entry, or None if it doesn't exist."""
path = PAPERS_DIR / f"{pmid}.json"
if path.exists():
return json.loads(path.read_text())
return None
def write_paper(paper: dict) -> None:
"""Write a paper store entry."""
PAPERS_DIR.mkdir(parents=True, exist_ok=True)
path = PAPERS_DIR / f"{paper['pmid']}.json"
path.write_text(json.dumps(paper, indent=2, ensure_ascii=False) + "\n")
# ---------------------------------------------------------------------------
# download command
# ---------------------------------------------------------------------------
def cmd_download(args: argparse.Namespace) -> None:
all_pmids, fulltext_pmids, directness_task_ids = discover_and_validate()
# --- Step 1: Fetch abstracts from PubMed ---
print("Step 1: Fetching abstracts from PubMed...")
missing_abstract = []
already_present = 0
for pmid in sorted(all_pmids):
paper = read_paper(pmid)
if paper and paper.get("abstract"):
already_present += 1
else:
missing_abstract.append(pmid)
print(f" {already_present} already in paper store, {len(missing_abstract)} to fetch")
if missing_abstract:
fetched = batch_fetch_abstracts_and_titles(missing_abstract)
fetch_ok = 0
fetch_fail = 0
for pmid in missing_abstract:
info = fetched.get(pmid, {})
abstract = info.get("abstract")
title = info.get("pubmed_title") or ""
if not abstract:
fetch_fail += 1
print(f" WARNING: no abstract returned for PMID {pmid}")
continue
existing = read_paper(pmid)
if existing:
existing["abstract"] = abstract
existing["title"] = title or existing.get("title", "")
write_paper(existing)
else:
write_paper({
"pmid": pmid,
"title": title,
"doi": None,
"abstract": abstract,
"fulltext": None,
})
fetch_ok += 1
print(f" Fetched {fetch_ok}, failed {fetch_fail}\n")
else:
print(" Nothing to fetch\n")
# --- Step 2: Backfill directness task abstracts ---
print("Step 2: Backfilling directness task abstracts...")
config = SUBSET_CONFIG["directness"]
backfilled = 0
skipped = 0
for task_id in directness_task_ids:
task_path = config["task_dir"] / f"{task_id}.json"
if not task_path.exists():
continue
task = json.loads(task_path.read_text())
needs_update = False
for study in task["studies"]:
if study.get("abstract"):
continue
pmid = study.get("pmid")
if not pmid:
continue
paper = read_paper(pmid)
if not paper:
continue
study["title"] = paper.get("title", "")
study["abstract"] = paper.get("abstract", "")
needs_update = True
if needs_update:
task_path.write_text(json.dumps(task, indent=2, ensure_ascii=False) + "\n")
backfilled += 1
else:
skipped += 1
print(f" Backfilled {backfilled} tasks, {skipped} already complete\n")
# --- Step 3: Download OA PDFs ---
if args.abstracts_only:
print("Skipping PDF download (--abstracts-only)\n")
print("Done. Abstracts ready for all tasks.")
return
print("Step 3: Downloading open-access PDFs...")
PDFS_DIR.mkdir(parents=True, exist_ok=True)
# Find PMIDs that need PDFs
need_pdf = []
pdf_already = 0
for pmid in sorted(fulltext_pmids):
if (PDFS_DIR / f"{pmid}.pdf").exists():
pdf_already += 1
else:
need_pdf.append(pmid)
print(f" {pdf_already} PDFs already on disk, {len(need_pdf)} to download")
if not need_pdf:
print(" Nothing to download\n")
print("Done. Run 'parse' to extract fulltext with GROBID.")
return
# Check OA availability
print(" Checking OA availability via OpenAlex...")
oa_info = check_openalex_oa(need_pdf)
print(" Checking PMC availability...")
pmc_info = check_pmc_availability(need_pdf)
# Download PDFs
downloaded = 0
missing: list[dict] = []
for pmid in need_pdf:
oa = oa_info.get(pmid, {})
pmcid = pmc_info.get(pmid)
# Build URL list in priority order
urls = []
if oa.get("pdf_url"):
urls.append(oa["pdf_url"])
if oa.get("oa_url"):
urls.append(oa["oa_url"])
if pmcid:
urls.append(
f"https://www.ncbi.nlm.nih.gov/pmc/articles/{pmcid}/pdf/"
)
if not urls:
missing.append({"pmid": pmid, "reason": "not_oa"})
continue
pdf_ok = False
for url in urls:
try:
time.sleep(API_DELAY)
resp = requests.get(
url, timeout=30, headers={"User-Agent": "SynthesisBench/0.1"}
)
resp.raise_for_status()
if not resp.content[:5].startswith(b"%PDF"):
continue # try next URL
(PDFS_DIR / f"{pmid}.pdf").write_bytes(resp.content)
downloaded += 1
pdf_ok = True
print(f" [{downloaded}] {pmid}: OK")
break
except requests.RequestException:
continue # try next URL
if not pdf_ok:
missing.append({
"pmid": pmid,
"reason": "download_failed",
"urls_tried": urls,
})
# Write missing.json
missing_path = PDFS_DIR / "missing.json"
missing_path.write_text(json.dumps(missing, indent=2) + "\n")
total_needed = len(fulltext_pmids)
total_have = pdf_already + downloaded
print(f"\n Downloaded {downloaded}/{len(need_pdf)} PDFs")
print(f" Total coverage: {total_have}/{total_needed} fulltext-required papers")
if missing:
print(f" {len(missing)} unavailable — see {missing_path}")
print(" Place PDFs manually as data/pdfs/{pmid}.pdf and re-run 'parse'")
print("\nDone. Run 'parse' to extract fulltext with GROBID.")
# ---------------------------------------------------------------------------
# parse command
# ---------------------------------------------------------------------------
def cmd_parse(args: argparse.Namespace) -> None:
all_pmids, fulltext_pmids, _ = discover_and_validate()
grobid_url = args.grobid_url
# Find PMIDs with PDF but no fulltext
need_parse = []
already_parsed = 0
no_pdf = 0
for pmid in sorted(fulltext_pmids):
pdf_path = PDFS_DIR / f"{pmid}.pdf"
if not pdf_path.exists():
no_pdf += 1
continue
paper = read_paper(pmid)
if paper and paper.get("fulltext"):
already_parsed += 1
continue
need_parse.append(pmid)
print(f" {already_parsed} already have fulltext")
print(f" {no_pdf} missing PDF (download first or place manually)")
print(f" {len(need_parse)} to parse\n")
if not need_parse:
total = already_parsed + len(need_parse)
print(f"Nothing to parse. Coverage: {already_parsed}/{len(fulltext_pmids)} fulltext-required papers.")
return
# Check GROBID
if not check_grobid_available(grobid_url):
print(f"ERROR: GROBID not available at {grobid_url}")
print()
print("Start GROBID with Docker:")
print(" docker run -d --name grobid -p 8070:8070 lfoppiano/grobid:0.8.0")
print()
print("Then re-run: python setup_papers.py parse")
sys.exit(1)
print(f"GROBID available at {grobid_url}")
parser = GrobidSectionParser(grobid_url)
parsed = 0
failed = 0
for i, pmid in enumerate(need_parse):
pdf_path = PDFS_DIR / f"{pmid}.pdf"
try:
sections = parser.parse_pdf(str(pdf_path))
paper = read_paper(pmid)
if not paper:
paper = {
"pmid": pmid,
"title": "",
"doi": None,
"abstract": None,
"fulltext": None,
}
paper["fulltext"] = {"sections": sections, "source": "grobid"}
write_paper(paper)
parsed += 1
print(f" [{i + 1}/{len(need_parse)}] {pmid}: {len(sections)} sections")
except GrobidParsingError as e:
failed += 1
print(f" [{i + 1}/{len(need_parse)}] {pmid}: FAILED ({e})")
print(f"\nParsed {parsed}/{len(need_parse)}")
if failed:
print(f"{failed} failed")
total_with_fulltext = already_parsed + parsed
print(f"Coverage: {total_with_fulltext}/{len(fulltext_pmids)} fulltext-required papers")
# ---------------------------------------------------------------------------
# CLI
# ---------------------------------------------------------------------------
def main() -> None:
parser = argparse.ArgumentParser(
description="Set up the paper store for EvidenceSynthBench.",
)
subparsers = parser.add_subparsers(dest="command", required=True)
# download
dl = subparsers.add_parser(
"download",
help="Download abstracts and OA PDFs",
)
dl.add_argument(
"--abstracts-only",
action="store_true",
help="Only fetch abstracts, skip PDF download",
)
# parse
ps = subparsers.add_parser(
"parse",
help="Parse downloaded PDFs with GROBID",
)
ps.add_argument(
"--grobid-url",
default="http://localhost:8070",
help="GROBID server URL (default: http://localhost:8070)",
)
args = parser.parse_args()
if args.command == "download":
cmd_download(args)
elif args.command == "parse":
cmd_parse(args)
if __name__ == "__main__":
main()