-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathembedding_utils.py
More file actions
53 lines (45 loc) · 2.05 KB
/
embedding_utils.py
File metadata and controls
53 lines (45 loc) · 2.05 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import torch
from transformers import AutoTokenizer, AutoModel
import gc
import os
def initialize_model_and_tokenizer(model_name='sentence-transformers/all-MiniLM-L6-v2'):
"""
Initialize the model and tokenizer.
:param model_name: Name of the pre-trained model to use.
:param device: The device to load the model on (default: auto-detect GPU if available).
:return: tokenizer, model, and device
"""
# Determine the appropriate device (GPU if available, otherwise CPU)
# This was necessary because of memory issues
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Load the tokenizer and model for embedding text with proxy settings
proxies = {
"http": os.environ.get('HTTP_PROXY'),
"https": os.environ.get('HTTPS_PROXY')
}
tokenizer = AutoTokenizer.from_pretrained(model_name, proxies=proxies)
model = AutoModel.from_pretrained(model_name, proxies=proxies).to(device)
return tokenizer, model, device
def embed_text(text, tokenizer, model, device):
"""
Embed the given text using the preloaded model and tokenizer.
This function converts the input text into an embedding vector using the loaded model.
:param text: The input text to embed.
:param tokenizer: The tokenizer to use for encoding the text.
:param model: The pre-trained model to generate embeddings.
:param device: The device to perform the computation on.
:return: The embedding vector for the input text.
"""
try:
# Tokenize and prepare the text for the model
inputs = tokenizer(text, return_tensors='pt', truncation=True, padding=True, max_length=512).to(device)
with torch.no_grad():
# Generate embeddings using the model
outputs = model(**inputs)
embeddings = outputs.last_hidden_state.mean(dim=1).detach().cpu().numpy()
# Clear GPU memory
gc.collect()
torch.cuda.empty_cache()
return embeddings
except Exception as e:
raise RuntimeError(f"Error during text embedding: {e}")