Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -9,3 +9,6 @@ manifest.ini
*.nvda-addon
.sconsign.dblite
/[0-9]*.[0-9]*.[0-9]*.json
venv
.env
.python-version
2 changes: 1 addition & 1 deletion addon/globalPlugins/WordBridge/configManager.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
"Google": _("Google"),
"OpenAI": _("OpenAI"),
"OpenRouter": _("OpenRouter"),
"claude-3-5-haiku-20241022": _("claude-3-5-haiku"),
"claude-haiku-4-5-20251001": _("claude-4-5-haiku"),
"claude-3-7-sonnet-20250219": _("claude-3.7-sonnet"),
"claude-sonnet-4-20250514": _("claude-4-sonnet"),
"deepseek-v3": _("deepseek-v3"),
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
{
"model": {
"model_name": "claude-haiku-4-5-20251001",
"provider": "Anthropic",
"llm_access_method": "personal_api_key",
"require_secret_key": false,
"template_name": {
"standard": "Standard_v1.json",
"lite": "Lite_v1.json"
},
"optional_guidance_enable": {
"keep_non_chinese_char": true,
"no_explanation": false
}
}
}
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"model": {
"model_name": "gemini-2.5-pro-preview-06-05",
"model_name": "gemini-2.5-flash",
"provider": "Google",
"llm_access_method": "personal_api_key",
"require_secret_key": false,
Expand All @@ -13,4 +13,4 @@
"no_explanation": false
}
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
{
"model": {
"model_name": "gemini-2.5-pro",
"provider": "Google",
"llm_access_method": "personal_api_key",
"require_secret_key": false,
"template_name": {
"standard": "Standard_v1.json",
"lite": "Lite_v1.json"
},
"optional_guidance_enable": {
"keep_non_chinese_char": true,
"no_explanation": false
}
}
}
5 changes: 5 additions & 0 deletions addon/globalPlugins/WordBridge/lib/llm/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
from .cost_calculator import CostCalculator

__all__ = [
'CostCalculator',
]
44 changes: 44 additions & 0 deletions addon/globalPlugins/WordBridge/lib/llm/cost_calculator.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
"""
Cost Calculator for tracking LLM usage and costs.

This module provides utilities for:
- Tracking token usage from API responses
- Calculating costs based on model pricing
"""

from decimal import Decimal
from collections import defaultdict

class CostCalculator:
def __init__(self, model_entry: dict):
self._model_entry = model_entry
self._pricing = model_entry.get("pricing", {})
self._usage_key = model_entry.get("usage_key")

def get_total_usage(self, response_history: list) -> dict:
total_usage = defaultdict(int)
if not self._usage_key:
return total_usage

for response in response_history:
if isinstance(response, dict) and self._usage_key in response:
for usage_type in self._pricing:
if usage_type == "base_unit":
continue
try:
total_usage[usage_type] += response[self._usage_key][usage_type]
except KeyError:
pass

return dict(total_usage)

def get_total_cost(self, response_history: list) -> Decimal:
cost = Decimal("0")
usages = self.get_total_usage(response_history)
for key, value in usages.items():
cost += (
Decimal(str(self._pricing[key]))
* Decimal(str(value))
/ Decimal(str(self._pricing["base_unit"]))
)
return cost
169 changes: 15 additions & 154 deletions addon/globalPlugins/WordBridge/lib/typo_corrector.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from decimal import Decimal
from threading import Thread
from typing import Any, Dict, List, Tuple
from pathlib import Path

import json
import logging
Expand All @@ -18,6 +19,7 @@
from .utils import get_char_pinyin, has_chinese, has_simplified_chinese_char, has_traditional_chinese_char
from .utils import PUNCTUATION, SEPERATOR, is_chinese_character, strings_diff, text_segmentation
from .utils import find_correction_errors, review_correction_errors, get_segments_to_recorrect
from .llm import CostCalculator

import chinese_converter

Expand Down Expand Up @@ -54,124 +56,6 @@ class BaseTypoCorrector():
"google": GoogleProvider,
"openrouter": OpenrouterProvider,
}
MODEL = {
"claude-3-5-haiku-20241022": {
"usage_key": "usage",
"input_tokens": "0.8",
"cache_creation_input_tokens": "1",
"cache_read_input_tokens": "0.08",
"output_tokens": "4",
"base_unit": "1000000"
},
"claude-3-7-sonnet-20250219": {
"usage_key": "usage",
"input_tokens": "3",
"cache_creation_input_tokens": "3.75",
"cache_read_input_tokens": "0.3",
"output_tokens": "15",
"base_unit": "1000000"
},
"claude-sonnet-4-20250514": {
"usage_key": "usage",
"input_tokens": "3",
"cache_creation_input_tokens": "3.75",
"cache_read_input_tokens": "0.3",
"output_tokens": "15",
"base_unit": "1000000"
},
"deepseek-v3": {},
"deepseek-chat": {
"usage_key": "usage",
"prompt_cache_hit_tokens": "0.07",
"prompt_cache_miss_tokens": "0.27",
"completion_tokens": "1.1",
"base_unit": "1000000"
},
"deepseek-reasoner": {
"usage_key": "usage",
"prompt_cache_hit_tokens": "0.14",
"prompt_cache_miss_tokens": "0.55",
"completion_tokens": "2.19",
"base_unit": "1000000"
},
"deepseek/deepseek-chat:free": {},
"deepseek/deepseek-chat-v3-0324:free": {},
"deepseek/deepseek-r1-0528:free": {},
"deepseek/deepseek-r1-0528-qwen3-8b:free": {},
"gemini-2.5-flash-preview-05-20": {
"usage_key": "usageMetadata",
"promptTokenCount": "0.15",
"candidatesTokenCount": "0.6",
"base_unit": "1000000"
},
"gemini-2.5-pro-preview-06-05": {
"usage_key": "usageMetadata",
"promptTokenCount": "1.25",
"candidatesTokenCount": "10",
"base_unit": "1000000"
},
"gpt-4o-2024-08-06": {
"usage_key": "usage",
"prompt_tokens": "2.5",
"completion_tokens": "10",
"base_unit": "1000000"
},
"gpt-4o-mini-2024-07-18": {
"usage_key": "usage",
"prompt_tokens": "0.15",
"completion_tokens": "0.6",
"base_unit": "1000000"
},
"gpt-4.1-2025-04-14": {
"usage_key": "usage",
"prompt_tokens": "2",
"completion_tokens": "8",
"base_unit": "1000000"
},
"gpt-4.1-mini-2025-04-14": {
"usage_key": "usage",
"prompt_tokens": "0.4",
"completion_tokens": "1.6",
"base_unit": "1000000"
},
"gpt-4.1-nano-2025-04-14": {
"usage_key": "usage",
"prompt_tokens": "0.1",
"completion_tokens": "0.4",
"base_unit": "1000000"
},
"o4-mini-2025-04-16": {
"usage_key": "usage",
"prompt_tokens": "1.1",
"completion_tokens": "4.4",
"base_unit": "1000000"
},
"gpt-5-chat-latest": {
"usage_key": "usage",
"prompt_tokens": "1.25",
"completion_tokens": "10",
"base_unit": "1000000",
},
"gpt-5": {
"usage_key": "usage",
"prompt_tokens": "1.25",
"completion_tokens": "10",
"base_unit": "1000000",
},
"gpt-5-mini": {
"usage_key": "usage",
"prompt_tokens": "0.25",
"completion_tokens": "2",
"base_unit": "1000000",
},
"gpt-5-nano": {
"usage_key": "usage",
"prompt_tokens": "0.05",
"completion_tokens": "0.4",
"base_unit": "1000000",
},
"ernie-4.0-turbo-8k": {},
}

def __init__(
self,
Expand Down Expand Up @@ -210,6 +94,16 @@ def __init__(
self.question_string = ""
self.answer_string = ""

config_key = f"{model}&{provider}"
model_entry = self._load_model_config(config_key)
self._cost_calculator = CostCalculator(model_entry)

def _load_model_config(self, config_key: str) -> dict:
config_path = Path(__file__).parent.parent / "setting" / "llm_models.json"
with open(config_path, "r", encoding="utf8") as f:
config = json.load(f)
return config.get(config_key, {})

def correct_text(self, text: str, batch_mode: bool = True, fake_corrected_text: str = None) -> Tuple:
"""
Analyze typos of text using self.segment_corrector. It also analyzes the difference between the original
Expand Down Expand Up @@ -342,43 +236,10 @@ def correct_segment_batch(self, input_text_list: list, previous_results_list: li
return output_text_list

def get_total_usage(self) -> Dict:
"""
Get the total usage of OpenAI model (in tokens)

Returns:
The total usage of OpenAI model (in tokens)
"""
usage_key = self.MODEL[self.model].get("usage_key")
total_usage = defaultdict(int)
if not usage_key:
return total_usage

for response in self.response_history:
if isinstance(response, dict) and usage_key in response:
for usage_type in set(self.MODEL[self.model].keys()):
if usage_type == "base_unit" or usage_type == "usage_key":
continue
try:
total_usage[usage_type] += response[usage_key][usage_type]
except KeyError:
pass

return total_usage

def get_total_cost(self) -> int:
"""
Get the total cost of provider model (in USD)

Returns:
The total cost of provider model (in USDs)
"""
price_info = self.MODEL[self.model]
cost = Decimal("0")
usages = self.get_total_usage()
for key, value in usages.items():
cost += Decimal(price_info[key]) * Decimal(str(value)) / Decimal(price_info["base_unit"])
return self._cost_calculator.get_total_usage(self.response_history)

return cost
def get_total_cost(self) -> Decimal:
return self._cost_calculator.get_total_cost(self.response_history)

def _correct_segment_task(
self,
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"model": {
"model_name": "claude-3-5-haiku-20241022",
"model_name": "claude-sonnet-4-20250514",
"provider": "Anthropic",
"llm_access_method": "personal_api_key",
"require_secret_key": false,
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
{
"model": {
"model_name": "claude-haiku-4-5-20251001",
"provider": "Anthropic",
"llm_access_method": "personal_api_key",
"require_secret_key": false,
"template_name": {
"standard": "Standard_v1.json",
"lite": "Lite_v1.json"
},
"optional_guidance_enable": {
"keep_non_chinese_char": true,
"no_explanation": false
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
{
"model": {
"model_name": "deepseek-v3",
"provider": "Baidu",
"llm_access_method": "personal_api_key",
"require_secret_key": true,
"template_name": {
"standard": "Standard_v1.json",
"lite": "Lite_v1.json"
},
"optional_guidance_enable": {
"keep_non_chinese_char": false,
"no_explanation": true
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
{
"model": {
"model_name": "ernie-4.0-turbo-8k",
"provider": "Baidu",
"llm_access_method": "personal_api_key",
"require_secret_key": true,
"template_name": {
"standard": "Standard_v1.json",
"lite": "Lite_v1.json"
},
"optional_guidance_enable": {
"keep_non_chinese_char": false,
"no_explanation": true
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
{
"model": {
"model_name": "deepseek/deepseek-chat-v3-0324:free",
"provider": "OpenRouter",
"llm_access_method": "coseeing_relay",
"require_secret_key": true,
"template_name": {
"standard": "Standard_v1.json",
"lite": "Lite_v1.json"
},
"optional_guidance_enable": {
"keep_non_chinese_char": false,
"no_explanation": true
}
}
}
Loading
Loading