forked from nikopueringer/CorridorKey
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathpyproject.toml
More file actions
147 lines (132 loc) · 4.01 KB
/
pyproject.toml
File metadata and controls
147 lines (132 loc) · 4.01 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
[project]
name = "corridorkey"
version = "1.0.0"
description = "Neural network green screen keying for professional VFX pipelines"
readme = "README.md"
requires-python = ">=3.10, <3.14"
license = { text = "CC-BY-NC-SA-4.0" }
authors = [{ name = "Corridor Digital" }]
dependencies = [
# Core inference
"torch==2.8.0",
"torchvision==0.23.0",
"timm==1.0.24",
"numpy",
"opencv-python",
"tqdm",
"setuptools",
# Triton fix for Windows
# There might still be issues though https://github.com/triton-lang/triton-windows?tab=readme-ov-file#windows-file-path-length-limit-260-causes-compilation-failure
"triton-windows==3.4.0.post21 ; sys_platform == 'win32'",
# GVM alpha hint generator
"diffusers",
"transformers",
"accelerate",
"peft",
"av",
"Pillow",
"PIMS",
"easydict",
"imageio",
"matplotlib",
# VideoMaMa alpha hint generator
"einops",
# BiRefNet alpha hint generator
"kornia",
# CLI tools (huggingface-hub is also a transitive dep, but must be direct
# so that uv installs the "hf" console-script entry point)
"huggingface-hub",
# CLI framework + terminal output
"typer>=0.12",
"rich>=13",
]
[project.optional-dependencies]
cuda = [
"torch==2.8.0",
"torchvision==0.23.0",
]
mlx = [
"corridorkey-mlx ; python_version >= '3.11'",
]
rocm = [
"torch==2.8.0",
"torchvision==0.23.0",
"pytorch-triton-rocm==3.4.0 ; platform_machine == 'x86_64' and sys_platform == 'linux'",
]
[dependency-groups]
dev = ["pytest", "pytest-cov", "ruff", "hypothesis"]
docs = ["zensical>=0.0.24"]
[project.scripts]
corridorkey = "corridorkey_cli:main"
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[tool.hatch.build.targets.wheel]
packages = ["CorridorKeyModule", "gvm_core", "VideoMaMaInferenceModule"]
[tool.ruff]
target-version = "py311"
line-length = 120
extend-exclude = ["gvm_core/", "VideoMaMaInferenceModule/"]
[tool.ruff.lint]
select = ["E", "F", "W", "I", "B"]
[tool.ruff.format]
# Third-party code excluded via top-level exclude
[tool.pytest.ini_options]
testpaths = ["tests"]
markers = [
"gpu: requires CUDA GPU (skipped in CI)",
"slow: long-running test",
"mlx: requires Apple Silicon with MLX installed",
]
env = ["OPENCV_IO_ENABLE_OPENEXR=1", ]
addopts = "--tb=short"
[tool.coverage.run]
branch = true
# "." covers root-level modules (clip_manager.py, corridorkey_cli.py once PR #8 merges).
# Third-party and non-library code is excluded via omit.
source = ["CorridorKeyModule", "."]
omit = [
"gvm_core/*",
"VideoMaMaInferenceModule/*",
"tests/*",
"test_vram.py", # manual GPU smoke test, not part of the pytest suite
]
[tool.uv]
# Guard against transitive deps (diffusers, imageio, PIMS) silently pulling in
# opencv-python-headless, which conflicts with opencv-python at the file level
# (both install into the same cv2/ directory). If any future dep requests
# opencv-python-headless, uv resolution will fail explicitly rather than
# corrupting the environment.
constraint-dependencies = [
"opencv-python-headless==99999",
]
conflicts = [
[
{ extra = "cuda" },
{ extra = "mlx" },
{ extra = "rocm" },
],
]
[[tool.uv.index]]
name = "pytorch"
url = "https://download.pytorch.org/whl/cu128" # CUDA 12.6 doesn't support RTX 5000 Series
explicit = true
extra = "cuda"
[[tool.uv.index]]
name = "pytorch-rocm"
url = "https://download.pytorch.org/whl/rocm6.3"
explicit = true
extra = "rocm"
[tool.uv.sources]
# Use Hiera fix in order to utilize the FlashAttention Kernel
timm = { git = "https://github.com/Raiden129/pytorch-image-models-fix", branch = "fix/hiera-flash-attention-global-4d" }
torch = [
{ index = "pytorch", extra = "cuda" },
{ index = "pytorch-rocm", extra = "rocm" },
]
torchvision = [
{ index = "pytorch", extra = "cuda" },
{ index = "pytorch-rocm", extra = "rocm" },
]
pytorch-triton-rocm = { index = "pytorch-rocm", extra = "rocm" }
corridorkey-mlx = { git = "https://github.com/nikopueringer/corridorkey-mlx.git", extra = "mlx" }