-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathflush_gpu.py
More file actions
33 lines (24 loc) · 1.01 KB
/
flush_gpu.py
File metadata and controls
33 lines (24 loc) · 1.01 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
# # Example in Python (after you're done with model/tensors)
# import torch
# import gc
# # For PyTorch - release cached memory (doesn't release actively used memory)
# if torch.cuda.is_available():
# torch.cuda.empty_cache()
# # For TensorFlow/Keras
# # from tensorflow.keras import backend as K
# # K.clear_session() # This often helps release graph-related memory
# # Run Python's garbage collector
# gc.collect()
# # Optional: double check cache after GC
# if torch.cuda.is_available():
# torch.cuda.empty_cache()
# Load model directly
# from transformers import AutoProcessor, AutoModelForImageTextToText
# processor = AutoProcessor.from_pretrained("bharathkumar1922001/gemma-3-12b-telugu")
# model = AutoModelForImageTextToText.from_pretrained("bharathkumar1922001/gemma-3-12b-telugu")
from huggingface_hub import hf_hub_download
file_path = hf_hub_download(
repo_id="bharathkumar1922001/gemma-3-12b-telugu",
filename="model-00003-of-00005.safetensors",
token="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"
)