Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
171 changes: 171 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,171 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so

# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST

# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/

# Translations
*.mo
*.pot

# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal

# Flask stuff:
instance/
.webassets-cache

# Scrapy stuff:
.scrapy

# Sphinx documentation
docs/_build/

# PyBuilder
.pybuilder/
target/

# Jupyter Notebook
.ipynb_checkpoints

# IPython
profile_default/
ipython_config.py

# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version

# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock

# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock

# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml

# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/

# Celery stuff
celerybeat-schedule
celerybeat.pid

# SageMath parsed files
*.sage.py

# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache/
.dmypy.json
dmypy.json

# Pyre type checker
.pyre/

# pytype static type analyzer
.pytype/

# Cython debug symbols
cython_debug/

# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/

# TecoGAN-specific files
/data/ToS3/GT.zip
/data/ToS3
/data/Vid4
/pretrained_models/TecoGAN_BD_iter500000.pth
/pretrained_models/TecoGAN_2x_BD_REDS_iter500K.pth
/pretrained_models/TecoGAN_4x_BD_REDS_iter500K.pth
/pretrained_models/TecoGAN_4x_BD_Vimeo_iter500K.pth
/pretrained_models/TecoGAN_4x_BI_Vimeo_iter500K.pth
/results/
6 changes: 5 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,13 @@ This is a PyTorch reimplementation of **TecoGAN**: **Te**mporally **Co**herent *
## Dependencies
- Ubuntu >= 16.04
- NVIDIA GPU + CUDA
- CUDA Toolkit 11.3 is the latest version with which Pytorch is compatible as of June 2022.
- To see which GPU driver versions are compatible with which CUDA Toolkit version, see [CUDA Compatibility](https://docs.nvidia.com/deploy/cuda-compatibility/index.html).
- Python >= 3.7
- PyTorch >= 1.4.0
- Python packages: numpy, matplotlib, opencv-python, pyyaml, lmdb
- Python packages: See [requirements.txt](requirements.txt)
- A higher version of pytorch and torchvision may work, but torch==1.7.1 (torchvision==0.8.2 seems compatible) is the lowest version available on pip for Python 3.9 and still only work with CUDA compute capability 3.7 (cards such as GeForce TITAN and TITAN BLACK are 3.5). For a list of which GPUs are compatible with which compute capability, see [CUDA GPUs - Compute Capability](https://developer.nvidia.com/cuda-gpus)
- To see what compute capability the installed version of torch supports, run: `python -c "import torch; print(torch.cuda.get_arch_list())"`.
- (Optional) Matlab >= R2016b


Expand Down
Empty file added codes/__init__.py
Empty file.
2 changes: 1 addition & 1 deletion codes/data/paired_folder_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import torch

from .base_dataset import BaseDataset
from utils.base_utils import retrieve_files
from codes.utils.base_utils import retrieve_files


class PairedFolderDataset(BaseDataset):
Expand Down
2 changes: 1 addition & 1 deletion codes/data/unpaired_folder_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import torch

from .base_dataset import BaseDataset
from utils.base_utils import retrieve_files
from codes.utils.base_utils import retrieve_files


class UnpairedFolderDataset(BaseDataset):
Expand Down
15 changes: 9 additions & 6 deletions codes/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@

import torch

from data import create_dataloader
from models import define_model
from metrics import create_metric_calculator
from utils import dist_utils, base_utils, data_utils
from codes.data import create_dataloader
from codes.models import define_model
from codes.metrics import create_metric_calculator
from codes.utils import dist_utils, base_utils, data_utils


def train(opt):
Expand Down Expand Up @@ -224,7 +224,7 @@ def profile(opt, lr_size, test_speed=False):
msg += f'{"*"*40}\nResolution: {lr_size} -> {hr_size} ({scale}x SR)'

# create model
from models.networks import define_generator
from codes.models.networks import define_generator
net_G = define_generator(opt).to(device)
# base_utils.log_info(f'\n{net_G.__str__()}')

Expand Down Expand Up @@ -264,7 +264,7 @@ def profile(opt, lr_size, test_speed=False):
base_utils.log_info(msg)


if __name__ == '__main__':
def main():
# === parse arguments === #
args = base_utils.parse_agrs()

Expand All @@ -290,3 +290,6 @@ def profile(opt, lr_size, test_speed=False):

else:
raise ValueError(f'Unrecognized mode: {args.mode} (train|test|profile)')

if __name__ == '__main__':
main()
7 changes: 5 additions & 2 deletions codes/metrics/LPIPS/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,14 @@
from __future__ import print_function

import numpy as np
from skimage.measure import compare_ssim
try:
from skimage.measure import compare_ssim
except ImportError:
from skimage.metrics import structural_similarity as compare_ssim
import torch
from torch.autograd import Variable

from metrics.LPIPS.models import dist_model
from codes.metrics.LPIPS.models import dist_model


class PerceptualLoss(torch.nn.Module):
Expand Down
2 changes: 1 addition & 1 deletion codes/metrics/LPIPS/models/dist_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from IPython import embed

from . import networks_basic as networks
import models as util
import codes.models as util

class DistModel(BaseModel):
def name(self):
Expand Down
2 changes: 1 addition & 1 deletion codes/metrics/LPIPS/models/networks_basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from IPython import embed
from . import pretrained_networks as pn

import metrics.LPIPS.models as util
import codes.metrics.LPIPS.models as util

def spatial_average(in_tens, keepdim=True):
return in_tens.mean([2,3],keepdim=keepdim)
Expand Down
4 changes: 2 additions & 2 deletions codes/metrics/metric_calculator.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@
import torch
import torch.distributed as dist

from utils import base_utils, data_utils, net_utils
from utils.dist_utils import master_only
from codes.utils import base_utils, data_utils, net_utils
from codes.utils.dist_utils import master_only
from .LPIPS.models.dist_model import DistModel


Expand Down
4 changes: 2 additions & 2 deletions codes/models/base_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel

from utils.data_utils import create_kernel, downsample_bd
from utils.dist_utils import master_only
from codes.utils.data_utils import create_kernel, downsample_bd
from codes.utils.dist_utils import master_only


class BaseModel():
Expand Down
16 changes: 8 additions & 8 deletions codes/models/networks/tecogan_nets.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,10 @@
import torch.nn.functional as F

from .base_nets import BaseSequenceGenerator, BaseSequenceDiscriminator
from utils.net_utils import space_to_depth, backward_warp, get_upsampling_func
from utils.net_utils import initialize_weights
from utils.data_utils import float32_to_uint8
from metrics.model_summary import register, parse_model_info
from codes.utils.net_utils import space_to_depth, backward_warp, get_upsampling_func
from codes.utils.net_utils import initialize_weights
from codes.utils.data_utils import float32_to_uint8
from codes.metrics.model_summary import register, parse_model_info


# ====================== generator modules ====================== #
Expand Down Expand Up @@ -236,8 +236,8 @@ def step(self, lr_curr, lr_prev, hr_prev):
lr_flow = self.fnet(lr_curr, lr_prev)

# pad if size is not a multiple of 8
pad_h = lr_curr.size(2) - lr_curr.size(2)//8*8
pad_w = lr_curr.size(3) - lr_curr.size(3)//8*8
pad_h = lr_curr.size(2) - lr_curr.size(2) // 8 * 8
pad_w = lr_curr.size(3) - lr_curr.size(3) // 8 * 8
lr_flow_pad = F.pad(lr_flow, (0, pad_w, 0, pad_h), 'reflect')

# upsample lr flow
Expand Down Expand Up @@ -303,8 +303,8 @@ def profile(self, lr_size, device):
gflops_dict['FNet'], params_dict['FNet'] = parse_model_info(self.fnet)

# profile module 2: sr module
pad_h = lr_curr.size(2) - lr_curr.size(2)//8*8
pad_w = lr_curr.size(3) - lr_curr.size(3)//8*8
pad_h = lr_curr.size(2) - lr_curr.size(2) // 8 * 8
pad_w = lr_curr.size(3) - lr_curr.size(3) // 8 * 8
lr_flow_pad = F.pad(lr_flow, (0, pad_w, 0, pad_h), 'reflect')
hr_flow = self.scale * self.upsample_func(lr_flow_pad)
hr_prev_warp = backward_warp(hr_prev, hr_flow)
Expand Down
2 changes: 1 addition & 1 deletion codes/models/vsr_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from .base_model import BaseModel
from .networks import define_generator
from .optim import define_criterion, define_lr_schedule
from utils import base_utils, net_utils, data_utils
from codes.utils import base_utils, net_utils, data_utils


class VSRModel(BaseModel):
Expand Down
4 changes: 2 additions & 2 deletions codes/models/vsrgan_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from .networks import define_generator, define_discriminator
from .networks.vgg_nets import VGGFeatureExtractor
from .optim import define_criterion, define_lr_schedule
from utils import base_utils, net_utils, dist_utils
from codes.utils import base_utils, net_utils, dist_utils


class VSRGANModel(VSRModel):
Expand Down Expand Up @@ -243,7 +243,7 @@ def train(self):
# ping-pong (pp) loss
if self.pp_crit is not None:
tempo_extent = self.opt['train']['tempo_extent']
hr_data_fw = hr_data[:, :tempo_extent - 1, ...] # -------->|
hr_data_fw = hr_data[:, :tempo_extent - 1, ...] # -------->|
hr_data_bw = hr_data[:, tempo_extent:, ...].flip(1) # <--------|

pp_w = self.opt['train']['pingpong_crit'].get('weight', 1)
Expand Down
5 changes: 4 additions & 1 deletion codes/official_metrics/LPIPSmodels/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,10 @@
import collections
import matplotlib.pyplot as plt
from scipy.ndimage.interpolation import zoom
from skimage.measure import compare_ssim
try:
from skimage.measure import compare_ssim
except ImportError:
from skimage.metrics import structural_similarity as compare_ssim
import torch
from IPython import embed
import cv2
Expand Down
2 changes: 1 addition & 1 deletion codes/official_metrics/evaluate.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

keys = args.model.split('_')
assert keys[0] in ('TecoGAN', 'FRVSR')
assert keys[1] in ('BD', 'BI')
assert (keys[1] in ('BD', 'BI')) or (keys[2] in ('BD', 'BI'))

# set dirs
Vid4_GT_dir = 'data/Vid4/GT'
Expand Down
5 changes: 4 additions & 1 deletion codes/official_metrics/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,10 @@
import pandas as pd
from LPIPSmodels import util
import LPIPSmodels.dist_model as dm
from skimage.measure import compare_ssim
try:
from skimage.measure import compare_ssim
except ImportError:
from skimage.metrics import structural_similarity as compare_ssim

from absl import flags
flags.DEFINE_string('output', None, 'the path of output directory')
Expand Down
Loading