-
Notifications
You must be signed in to change notification settings - Fork 7
Expand file tree
/
Copy pathmain_instance_segmentation.py
More file actions
118 lines (97 loc) · 4.04 KB
/
main_instance_segmentation.py
File metadata and controls
118 lines (97 loc) · 4.04 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
import glob
import logging
import os
import torch
from hashlib import md5
from uuid import uuid4
import hydra
from dotenv import load_dotenv
from omegaconf import DictConfig, OmegaConf
from trainer.trainer import InstanceSegmentation, RegularCheckpointing
from pytorch_lightning.callbacks import ModelCheckpoint
from utils.utils import (
flatten_dict,
load_baseline_model,
load_checkpoint_with_missing_or_exsessive_keys,
load_backbone_checkpoint_with_missing_or_exsessive_keys
)
from pytorch_lightning import Trainer, seed_everything
def get_parameters(cfg: DictConfig):
logger = logging.getLogger(__name__)
load_dotenv(".env")
# parsing input parameters
seed_everything(cfg.general.seed)
# getting basic configuration
if cfg.general.get("gpus", None) is None:
cfg.general.gpus = os.environ.get("CUDA_VISIBLE_DEVICES", None)
# Get the max available/requested gpu number
cfg.general.gpus = min(cfg.general.gpus, torch.cuda.device_count())
loggers = []
# cfg.general.experiment_id = "0" # str(Repo("./").commit())[:8]
# params = flatten_dict(OmegaConf.to_container(cfg, resolve=True))
# create unique id for experiments that are run locally
# unique_id = "_" + str(uuid4())[:4]
# cfg.general.version = md5(str(params).encode("utf-8")).hexdigest()[:8] + unique_id
if not os.path.exists(cfg.general.save_dir):
os.makedirs(cfg.general.save_dir)
elif cfg.general.resume:
print("EXPERIMENT ALREADY EXIST")
if os.path.isfile(f"{cfg.general.save_dir}/last-epoch.ckpt"):
cfg['trainer']['resume_from_checkpoint'] = f"{cfg.general.save_dir}/last-epoch.ckpt"
for log in cfg.logging:
print(log)
loggers.append(hydra.utils.instantiate(log))
loggers[-1].log_hyperparams(
flatten_dict(OmegaConf.to_container(cfg, resolve=True))
)
model = InstanceSegmentation(cfg)
if cfg.general.backbone_checkpoint is not None:
cfg, model = load_backbone_checkpoint_with_missing_or_exsessive_keys(cfg, model)
if cfg.general.checkpoint is not None:
if isinstance(cfg.general.checkpoint, str):
print(f'Loading model weights from checkpoint: {cfg.general.checkpoint}')
cfg, model = load_checkpoint_with_missing_or_exsessive_keys(cfg, model)
else:
# Load best mAP score from the experiment path
ckpt_paths = glob.glob(f"{cfg.general.save_dir}/*mean_ap_50*.ckpt")
cfg.general.checkpoint = sorted(ckpt_paths)[-1] if len(ckpt_paths) > 0 else f"{cfg.general.save_dir}/last.ckpt"
print(f'Loading model weights from checkpoint: {cfg.general.checkpoint}')
cfg, model = load_checkpoint_with_missing_or_exsessive_keys(cfg, model)
logger.info(flatten_dict(OmegaConf.to_container(cfg, resolve=True)))
return cfg, model, loggers
@hydra.main(config_path="conf", config_name="config_base_instance_segmentation.yaml")
def train(cfg: DictConfig):
os.chdir(hydra.utils.get_original_cwd())
cfg, model, loggers = get_parameters(cfg)
callbacks = []
for cb in cfg.callbacks:
callbacks.append(hydra.utils.instantiate(cb))
callbacks.append(RegularCheckpointing())
runner = Trainer(
logger=loggers,
gpus=cfg.general.gpus,
callbacks=callbacks,
weights_save_path=str(cfg.general.save_dir),
**cfg.trainer,
)
runner.fit(model)
@hydra.main(config_path="conf", config_name="config_base_instance_segmentation.yaml")
def test(cfg: DictConfig):
# because hydra wants to change dir for some reason
os.chdir(hydra.utils.get_original_cwd())
cfg, model, loggers = get_parameters(cfg)
runner = Trainer(
gpus=cfg.general.gpus,
logger=loggers,
weights_save_path=str(cfg.general.save_dir),
**cfg.trainer
)
runner.test(model)
@hydra.main(config_path="conf", config_name="config_base_instance_segmentation.yaml")
def main(cfg: DictConfig):
if cfg['general']['train_mode']:
train(cfg)
else:
test(cfg)
if __name__ == "__main__":
main()