-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdedup.py
More file actions
286 lines (257 loc) · 12.1 KB
/
dedup.py
File metadata and controls
286 lines (257 loc) · 12.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
import sys
import os
import hashlib
import argparse
import json
def chunk_reader(fobj, chunk_size=1024):
"""Generator that reads a file in chunks of bytes"""
while True:
chunk = fobj.read(chunk_size)
if not chunk:
return
yield chunk
def load_precedence_rules(path):
if not path:
return []
try:
with open(path, "r") as f:
rules = json.load(f)
# Each rule should be a dict with 'keep' and 'delete' keys
return rules if isinstance(rules, list) else []
except Exception as e:
print(f"Could not load precedence rules: {e}")
return []
def match_precedence_rule(rules, path1, path2):
# Returns (keep_path, delete_path, rule) if a rule matches, else (None, None, None)
for rule in rules:
keep_prefix = rule.get("keep")
delete_prefix = rule.get("delete")
if keep_prefix and delete_prefix:
# Normalize for OS
kp = os.path.normpath(keep_prefix)
dp = os.path.normpath(delete_prefix)
p1 = os.path.normpath(path1)
p2 = os.path.normpath(path2)
if p1.startswith(kp) and p2.startswith(dp):
return (path1, path2, rule)
if p2.startswith(kp) and p1.startswith(dp):
return (path2, path1, rule)
return (None, None, None)
def load_dir_hash_record(dirpath, record_name):
record_path = os.path.join(dirpath, record_name)
if os.path.exists(record_path):
try:
with open(record_path, "r") as f:
record = json.load(f)
# Convert any hex string hashes back to bytes
for fname, meta in record.items():
if "hash" in meta and isinstance(meta["hash"], str):
try:
meta["hash"] = bytes.fromhex(meta["hash"])
except Exception:
pass
print(f"Loaded hash record from {record_path}")
return record
except Exception as e:
print(f"Could not load hash record {record_path}: {e}")
print(f"Creating new hash record for {dirpath}")
return {}
def save_dir_hash_record(dirpath, record_name, record):
record_path = os.path.join(dirpath, record_name)
# Convert any bytes in 'hash' fields to hex strings for JSON serialization
serializable_record = {}
for fname, meta in record.items():
meta_copy = dict(meta)
if isinstance(meta_copy.get("hash"), bytes):
meta_copy["hash"] = meta_copy["hash"].hex()
serializable_record[fname] = meta_copy
try:
with open(record_path, "w") as f:
json.dump(serializable_record, f, indent=2, sort_keys=True)
print(f"Saved hash record to {record_path}")
except Exception as e:
print(f"Could not save hash record {record_path}: {e}")
def get_file_hash(full_path, hashfunc, mtime, size, dir_record, filename, dirpath=None, record_name=None, record_hashes=False):
rec = dir_record.get(filename)
reason = None
if rec is None:
reason = "not in database"
elif rec.get("mtime") != mtime:
reason = "mtime changed"
elif rec.get("size") != size:
reason = "size changed"
elif "hash" not in rec:
reason = "hash missing"
if reason:
print(f"{reason} -> Computing hash for {full_path}")
hashobj = hashfunc()
with open(full_path, 'rb') as f:
for chunk in chunk_reader(f):
hashobj.update(chunk)
file_hash = hashobj.digest()
# Update record
dir_record[filename] = {"mtime": mtime, "size": size, "hash": file_hash}
# Write out the updated dir_record immediately after getting a new hash if requested
if record_hashes and dirpath and record_name:
save_dir_hash_record(dirpath, record_name, dir_record)
return file_hash
else:
print(f"Using cached hash for {full_path}")
return rec["hash"]
def log_deletion(logfile, deleted, kept, reason):
try:
with open(logfile, "a") as f:
f.write(json.dumps({
"deleted": deleted,
"kept": kept,
"reason": reason
}) + "\n")
except Exception as e:
print(f"Could not write to log file {logfile}: {e}", file=sys.stderr)
def check_for_duplicates(
paths,
delete=False,
hashfunc=hashlib.sha256,
precedence_rules=None,
record_hashes=False,
record_name=".dedup_hashes.json",
min_filesize=1024, # Ignore files smaller than 1 KB by default
no_read_hashes=False,
log_file=None
):
hashes = {}
dir_records = {} # dirpath -> {filename: {mtime, size, hash}}
ignore_files = {".DS_Store", record_name}
for path in paths:
for dirpath, dirnames, filenames in os.walk(path):
print(f"Checking directory: {dirpath}")
# Load hash record for this directory if present, unless bypassed
if not no_read_hashes:
dir_record = load_dir_hash_record(dirpath, record_name)
else:
dir_record = {}
dir_records[dirpath] = dir_record
for filename in filenames:
if filename in ignore_files:
continue
full_path = os.path.join(dirpath, filename)
try:
stat = os.stat(full_path)
except Exception as e:
print(f"Could not stat file {full_path}: {e}")
continue
mtime = int(stat.st_mtime)
size = stat.st_size
if min_filesize > 0 and size < min_filesize:
# Ignore small files
continue
file_hash = get_file_hash(
full_path, hashfunc, mtime, size, dir_record, filename,
dirpath, record_name, record_hashes
)
# Use (hash, size) as key for deduplication
file_id = (file_hash, size)
duplicate = hashes.get(file_id, None)
if duplicate:
print(f"\nDuplicate found:\n [1] {full_path}\n [2] {duplicate}")
keep, to_delete, rule_used = None, None, None
if precedence_rules:
keep, to_delete, rule_used = match_precedence_rule(precedence_rules, full_path, duplicate)
if keep and to_delete:
print(f"Precedence rule: KEEP {keep}, DELETE {to_delete}")
if delete:
try:
os.remove(to_delete)
print(f"Deleted (by rule): {to_delete}")
if log_file:
log_deletion(log_file, to_delete, keep, f"precedence rule {rule_used}")
except Exception as e:
print(f"Could not delete {to_delete}: {e}")
else:
print(f"[DRY RUN] Would delete (by rule): {to_delete}")
hashes[file_id] = keep
continue
if delete:
path1 = full_path
path2 = duplicate
if os.path.dirname(path1) == os.path.dirname(path2):
time1 = os.path.getmtime(path1)
time2 = os.path.getmtime(path2)
print("Files are in the same directory: deleting newest")
if time1 > time2:
print(f"Deleting:\n [1] {path1}")
try:
os.remove(path1)
if log_file:
log_deletion(log_file, path1, path2, "deleted newest in the same directory")
except:
print(f"Could not find file:\n {path1}\nContinuing...")
hashes[file_id] = path2
else:
print(f"Deleting:\n [2] {path2}")
try:
os.remove(path2)
if log_file:
log_deletion(log_file, path2, path1, "deleted newest in the same directory")
except:
print(f"Could not find file:\n {path2}\nContinuing...")
hashes[file_id] = path1
else:
selection = input("Which to delete? [1/2] (type anything else to keep both)> ")
if selection == "1":
print(f"Deleting:\n [1] {path1}")
try:
os.remove(path1)
if log_file:
log_deletion(log_file, path1, path2, "user choice")
except:
print(f"Could not find file:\n {path1}\nContinuing...")
hashes[file_id] = path2
elif selection == "2":
print(f"Deleting:\n [2] {path2}")
try:
os.remove(path2)
if log_file:
log_deletion(log_file, path2, path1, "user choice")
except:
print(f"Could not find file:\n {path2}\nContinuing...")
else:
print("Not deleting either image")
else:
print("[DRY RUN] Would prompt for deletion or keep both.")
else:
hashes[file_id] = full_path
def main():
parser = argparse.ArgumentParser(
description="Find and optionally delete duplicate files in given directories."
)
parser.add_argument("paths", nargs="+", help="Directories to check for duplicates")
parser.add_argument("-d", "--delete", action="store_true", help="Delete duplicates interactively or by rule")
parser.add_argument("--precedence-rules", help="Path to precedence_rules.json for auto-deletion rules")
parser.add_argument("--record-hashes", action="store_true", help="Store and update per-directory JSON hash records for resumability")
parser.add_argument("--record-name", default=".dedup_hashes.json", help="Filename for per-directory hash record (default: .dedup_hashes.json)")
parser.add_argument("--min-filesize", type=int, default=1024, help="Ignore files smaller than this many bytes (default: 1024, set to 0 to disable)")
parser.add_argument("--no-read-hashes", action="store_true", help="Do not read from per-directory hash record files even if present")
parser.add_argument("--log-file", default="dedup_deletions.log", help="Append all deletions to this log file as JSON lines (default: dedup_deletions.log)")
parser.add_argument("--no-log-file", action="store_true", help="Do not log deletions to a file")
args = parser.parse_args()
if not args.record_hashes:
print("Warning: --record-hashes not set, hashes will not be persisted for the next run.", file=sys.stderr)
log_file = None
if args.no_log_file:
print("Warning: --no-log-file set, deletions will not be logged.", file=sys.stderr)
else:
log_file = args.log_file
precedence_rules = load_precedence_rules(args.precedence_rules) if args.precedence_rules else None
check_for_duplicates(
args.paths,
delete=args.delete,
precedence_rules=precedence_rules,
record_hashes=args.record_hashes,
record_name=args.record_name,
min_filesize=args.min_filesize,
no_read_hashes=args.no_read_hashes,
log_file=log_file
)
if __name__ == "__main__":
main()