|
|
import json |
|
|
import time |
|
|
import logging |
|
|
from typing import Optional, Dict, Any, Callable, Tuple |
|
|
from transformers import AutoTokenizer |
|
|
from openai import OpenAI |
|
|
import requests |
|
|
from functools import wraps |
|
|
import os |
|
|
|
|
|
|
|
|
logging.basicConfig( |
|
|
level=logging.INFO, |
|
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' |
|
|
) |
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
def model_query_decorator(func: Callable) -> Callable: |
|
|
""" |
|
|
model query decorator, handle common pre-processing and retry logic |
|
|
get max_tries and time_sleep from self instance dynamically |
|
|
""" |
|
|
@wraps(func) |
|
|
def wrapper(self, prompt: str) -> str: |
|
|
|
|
|
if not prompt or not prompt.strip(): |
|
|
logger.warning("Empty prompt, return empty string") |
|
|
return "", None |
|
|
|
|
|
|
|
|
processed_prompt = self._truncate_prompt(prompt.strip()) |
|
|
|
|
|
|
|
|
max_tries = getattr(self, 'max_tries', 5) |
|
|
time_sleep = getattr(self, 'time_sleep', 1.0) |
|
|
|
|
|
time.sleep(time_sleep) |
|
|
|
|
|
|
|
|
last_exception = None |
|
|
|
|
|
for attempt in range(1, max_tries + 1): |
|
|
try: |
|
|
logger.info(f"Try {attempt} times...") |
|
|
|
|
|
|
|
|
answer, thinking = func(self, processed_prompt) |
|
|
|
|
|
logger.info(f"Query successful, try {attempt} times") |
|
|
|
|
|
return answer, thinking |
|
|
|
|
|
except KeyboardInterrupt: |
|
|
logger.info("User interrupt") |
|
|
raise |
|
|
except Exception as e: |
|
|
last_exception = e |
|
|
logger.warning(f"API error (try {attempt}/{max_tries}): {e}") |
|
|
|
|
|
|
|
|
if attempt < max_tries: |
|
|
sleep_time = time_sleep * (2 ** (attempt - 1)) |
|
|
logger.info(f"Wait {sleep_time:.1f} seconds and retry...") |
|
|
time.sleep(sleep_time) |
|
|
|
|
|
|
|
|
logger.error(f"All {max_tries} tries failed, last error: {last_exception}") |
|
|
return "", None |
|
|
|
|
|
return wrapper |
|
|
|
|
|
|
|
|
class ModelManagerBase: |
|
|
"""Base model manager""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
tokenizer_path: str, |
|
|
context_max_length: int, |
|
|
url: str, |
|
|
api_key: str, |
|
|
temperature: float, |
|
|
max_new_tokens: int, |
|
|
timeout: int, |
|
|
max_tries: int, |
|
|
time_sleep: float, |
|
|
): |
|
|
|
|
|
if not os.path.exists(tokenizer_path): |
|
|
raise ValueError("tokenizer_path is not found") |
|
|
if context_max_length <= 0: |
|
|
raise ValueError("context_max_length must be greater than 0") |
|
|
if max_tries <= 0: |
|
|
raise ValueError("max_tries must be greater than 0") |
|
|
|
|
|
self.tokenizer_path = tokenizer_path |
|
|
self.context_max_length = context_max_length |
|
|
self.url = url |
|
|
self.api_key = api_key |
|
|
self.temperature = temperature |
|
|
self.max_new_tokens = max_new_tokens |
|
|
self.timeout = timeout |
|
|
self.max_tries = max_tries |
|
|
self.time_sleep = time_sleep |
|
|
self.tokenizer = self._get_tokenizer() |
|
|
|
|
|
def _get_tokenizer(self) -> AutoTokenizer: |
|
|
"""Get tokenizer""" |
|
|
try: |
|
|
return AutoTokenizer.from_pretrained( |
|
|
self.tokenizer_path, |
|
|
trust_remote_code=True |
|
|
) |
|
|
except Exception as e: |
|
|
logger.error(f"Failed to load tokenizer: {e}") |
|
|
raise |
|
|
|
|
|
def _truncate_prompt(self, prompt: str) -> str: |
|
|
"""Truncate prompt, keep important parts""" |
|
|
input_ids = self.tokenizer.encode(prompt) |
|
|
|
|
|
if len(input_ids) <= self.context_max_length: |
|
|
return prompt |
|
|
|
|
|
truncated_input_ids = input_ids[:self.context_max_length//2] + input_ids[-self.context_max_length//2:] |
|
|
|
|
|
truncated_prompt = self.tokenizer.decode( |
|
|
truncated_input_ids, |
|
|
skip_special_tokens=True |
|
|
) |
|
|
|
|
|
return truncated_prompt |
|
|
|
|
|
@model_query_decorator |
|
|
def query(self, processed_prompt: str) -> str: |
|
|
"""Query LLM model""" |
|
|
raise NotImplementedError("Subclass must implement this method") |
|
|
|
|
|
|
|
|
class ModelManagerMagistral(ModelManagerBase): |
|
|
"""Magistral model manager""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
model_name: str, |
|
|
tokenizer_path: str = "", |
|
|
context_max_length: int = 120000, |
|
|
url: str = "http://127.0.0.1:8000/v1", |
|
|
api_key: str = "EMPTY", |
|
|
temperature: float = 0.7, |
|
|
max_new_tokens: int = 8192, |
|
|
timeout: int = 1200, |
|
|
max_tries: int = 5, |
|
|
time_sleep: float = 1.0, |
|
|
extra_body: Optional[Dict[str, Any]] = None, |
|
|
): |
|
|
super().__init__(tokenizer_path, context_max_length, url, api_key, |
|
|
temperature, max_new_tokens, timeout, max_tries, time_sleep) |
|
|
self.model_name = model_name |
|
|
self.extra_body = extra_body or {} |
|
|
self.client = self._create_client() |
|
|
self.system_prompt = """First draft your thinking process (inner monologue) until you arrive at a response. Format your response using Markdown, and use LaTeX for any mathematical equations. Write both your thoughts and the response in the same language as the input.\n\nYour thinking process must follow the template below:[THINK]Your thoughts or/and draft, like working through an exercise on scratch paper. Be as casual and as long as you want until you are confident to generate the response. Use the same language as the input.[/THINK]Here, provide a self-contained response.""" |
|
|
|
|
|
def _create_client(self) -> OpenAI: |
|
|
"""Create Magistral client""" |
|
|
try: |
|
|
return OpenAI( |
|
|
base_url=self.url, |
|
|
api_key=self.api_key |
|
|
) |
|
|
except Exception as e: |
|
|
logger.error(f"Failed to create OpenAI client: {e}") |
|
|
raise |
|
|
|
|
|
@model_query_decorator |
|
|
def query(self, processed_prompt: str) -> Tuple[str, Optional[str]]: |
|
|
"""Query LLM model - only handle OpenAI specific logic""" |
|
|
completion = self.client.chat.completions.create( |
|
|
model=self.model_name, |
|
|
messages=[ |
|
|
{"role": "system", "content": self.system_prompt}, |
|
|
{"role": "user", "content": processed_prompt} |
|
|
], |
|
|
temperature=self.temperature, |
|
|
extra_body=self.extra_body, |
|
|
max_tokens=self.max_new_tokens, |
|
|
timeout=self.timeout, |
|
|
) |
|
|
answer = completion.choices[0].message.content |
|
|
try: |
|
|
thinking = completion.choices[0].message.reasoning_content |
|
|
except: |
|
|
thinking = None |
|
|
return answer, thinking |
|
|
|
|
|
class ModelManagerOpenAI(ModelManagerBase): |
|
|
"""OpenAI model manager""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
model_name: str, |
|
|
tokenizer_path: str = "model/Tokenizers/qwen", |
|
|
context_max_length: int = 120000, |
|
|
url: str = "http://127.0.0.1:8000/v1", |
|
|
api_key: str = "EMPTY", |
|
|
temperature: float = 1.0, |
|
|
max_new_tokens: int = 8192, |
|
|
timeout: int = 1200, |
|
|
max_tries: int = 5, |
|
|
time_sleep: float = 1.0, |
|
|
extra_body: Optional[Dict[str, Any]] = None, |
|
|
): |
|
|
super().__init__(tokenizer_path, context_max_length, url, api_key, |
|
|
temperature, max_new_tokens, timeout, max_tries, time_sleep) |
|
|
self.model_name = model_name |
|
|
self.extra_body = extra_body or {} |
|
|
self.client = self._create_client() |
|
|
|
|
|
def _create_client(self) -> OpenAI: |
|
|
"""Create OpenAI client""" |
|
|
try: |
|
|
return OpenAI( |
|
|
base_url=self.url, |
|
|
api_key=self.api_key |
|
|
) |
|
|
except Exception as e: |
|
|
logger.error(f"Failed to create OpenAI client: {e}") |
|
|
raise |
|
|
|
|
|
@model_query_decorator |
|
|
def query(self, processed_prompt: str) -> Tuple[str, Optional[str]]: |
|
|
"""Query LLM model - only handle OpenAI specific logic""" |
|
|
completion = self.client.chat.completions.create( |
|
|
model=self.model_name, |
|
|
messages=[{"role": "user", "content": processed_prompt}], |
|
|
temperature=self.temperature, |
|
|
extra_body=self.extra_body, |
|
|
max_tokens=self.max_new_tokens, |
|
|
timeout=self.timeout, |
|
|
) |
|
|
answer = completion.choices[0].message.content |
|
|
try: |
|
|
thinking = completion.choices[0].message.reasoning_content |
|
|
except: |
|
|
thinking = None |
|
|
return answer, thinking |
|
|
|
|
|
class ModelManagerGemini3(ModelManagerBase): |
|
|
"""Gemini 3.0 model manager""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
tokenizer_path: str = "model/Tokenizers/gemini", |
|
|
context_max_length: int = 1000000, |
|
|
url: str = "https://runway.devops.rednote.life/openai/google/v1:generateContent", |
|
|
api_key: str = "162420e2621c480d9f8ab1bb7b8c4c91", |
|
|
temperature: float = 1.0, |
|
|
max_new_tokens: int = 32768, |
|
|
timeout: int = 1200, |
|
|
max_tries: int = 5, |
|
|
time_sleep: float = 1.0, |
|
|
): |
|
|
super().__init__(tokenizer_path, context_max_length, url, api_key, |
|
|
temperature, max_new_tokens, timeout, max_tries, time_sleep) |
|
|
self.headers = { |
|
|
'api-key': self.api_key, |
|
|
'Content-Type': 'application/json' |
|
|
} |
|
|
|
|
|
@model_query_decorator |
|
|
def query(self, processed_prompt: str) -> Tuple[str, Optional[str]]: |
|
|
"""Query LLM model - only handle Gemini specific logic""" |
|
|
|
|
|
payload = json.dumps({ |
|
|
"contents": [ |
|
|
{ |
|
|
"role": "user", |
|
|
"parts": [ |
|
|
{ |
|
|
"text": processed_prompt |
|
|
} |
|
|
] |
|
|
} |
|
|
], |
|
|
"generationConfig": { |
|
|
"maxOutputTokens": self.max_new_tokens, |
|
|
"temperature": self.temperature, |
|
|
"thinkingConfig": { |
|
|
"thinking_level": "high" |
|
|
} |
|
|
} |
|
|
}) |
|
|
|
|
|
|
|
|
response = requests.request("POST", self.url, headers=self.headers, data=payload).json() |
|
|
|
|
|
|
|
|
return response["candidates"][0]["content"]["parts"][0]["text"], None |
|
|
|
|
|
|
|
|
class ModelManagerGemini25(ModelManagerBase): |
|
|
"""Gemini 2.5 thinking model manager""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
tokenizer_path: str = "model/Tokenizers/gemini", |
|
|
context_max_length: int = 1000000, |
|
|
url: str = "https://runway.devops.rednote.life/openai/google/v1:generateContent", |
|
|
api_key: str = "66c251052f44452a834ce83d0c7fd3ba", |
|
|
temperature: float = 1.0, |
|
|
max_new_tokens: int = 32768, |
|
|
timeout: int = 1200, |
|
|
max_tries: int = 5, |
|
|
time_sleep: float = 1.0, |
|
|
): |
|
|
super().__init__(tokenizer_path, context_max_length, url, api_key, |
|
|
temperature, max_new_tokens, timeout, max_tries, time_sleep) |
|
|
self.headers = { |
|
|
'api-key': self.api_key, |
|
|
'Content-Type': 'application/json' |
|
|
} |
|
|
|
|
|
@model_query_decorator |
|
|
def query(self, processed_prompt: str) -> Tuple[str, Optional[str]]: |
|
|
"""Query LLM model - only handle Gemini specific logic""" |
|
|
|
|
|
payload = json.dumps({ |
|
|
"contents": [ |
|
|
{ |
|
|
"role": "user", |
|
|
"parts": [ |
|
|
{ |
|
|
"text": processed_prompt |
|
|
} |
|
|
] |
|
|
} |
|
|
], |
|
|
"generationConfig": { |
|
|
"maxOutputTokens": self.max_new_tokens, |
|
|
"temperature": self.temperature, |
|
|
"thinkingConfig": { |
|
|
"thinkingBudget": -1 |
|
|
} |
|
|
} |
|
|
}) |
|
|
|
|
|
|
|
|
response = requests.request("POST", self.url, headers=self.headers, data=payload).json() |
|
|
|
|
|
|
|
|
return response["candidates"][0]["content"]["parts"][0]["text"], None |
|
|
|
|
|
|
|
|
class ModelManagerGemini25FlashNonthinking(ModelManagerBase): |
|
|
"""Gemini 2.5 Flash nonthinking model manager""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
tokenizer_path: str = "model/Tokenizers/gemini", |
|
|
context_max_length: int = 1000000, |
|
|
url: str = "https://runway.devops.rednote.life/openai/google/v1:generateContent", |
|
|
api_key: str = "66c251052f44452a834ce83d0c7fd3ba", |
|
|
temperature: float = 1.0, |
|
|
max_new_tokens: int = 1024, |
|
|
timeout: int = 1200, |
|
|
max_tries: int = 5, |
|
|
time_sleep: float = 1.0, |
|
|
): |
|
|
super().__init__(tokenizer_path, context_max_length, url, api_key, |
|
|
temperature, max_new_tokens, timeout, max_tries, time_sleep) |
|
|
self.headers = { |
|
|
'api-key': self.api_key, |
|
|
'Content-Type': 'application/json' |
|
|
} |
|
|
|
|
|
@model_query_decorator |
|
|
def query(self, processed_prompt: str) -> Tuple[str, Optional[str]]: |
|
|
"""Query LLM model - only handle Gemini specific logic""" |
|
|
|
|
|
payload = json.dumps({ |
|
|
"contents": [ |
|
|
{ |
|
|
"role": "user", |
|
|
"parts": [ |
|
|
{ |
|
|
"text": processed_prompt |
|
|
} |
|
|
] |
|
|
} |
|
|
], |
|
|
"generationConfig": { |
|
|
"maxOutputTokens": self.max_new_tokens, |
|
|
"temperature": self.temperature, |
|
|
"thinkingConfig": { |
|
|
"thinkingBudget": 0 |
|
|
} |
|
|
} |
|
|
}) |
|
|
|
|
|
|
|
|
response = requests.request("POST", self.url, headers=self.headers, data=payload).json() |
|
|
|
|
|
|
|
|
return response["candidates"][0]["content"]["parts"][0]["text"], None |
|
|
|
|
|
|
|
|
class ModelManagerGPT5(ModelManagerBase): |
|
|
"""GPT5 model manager""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
tokenizer_path: str = "model/Tokenizers/gpt", |
|
|
context_max_length: int = 262144, |
|
|
url: str = "https://runway.devops.rednote.life/openai/chat/completions?api-version=2025-01-01-preview", |
|
|
api_key: str = "9a7403aa383e4a44a8c0f852710630e0", |
|
|
temperature: float = 1.0, |
|
|
max_new_tokens: int = 32768, |
|
|
timeout: int = 1200, |
|
|
max_tries: int = 5, |
|
|
time_sleep: float = 1.0, |
|
|
): |
|
|
super().__init__(tokenizer_path, context_max_length, url, api_key, |
|
|
temperature, max_new_tokens, timeout, max_tries, time_sleep) |
|
|
self.headers = { |
|
|
'api-key': self.api_key, |
|
|
'Content-Type': 'application/json' |
|
|
} |
|
|
|
|
|
@model_query_decorator |
|
|
def query(self, processed_prompt: str) -> Tuple[str, Optional[str]]: |
|
|
"""Query LLM model - only handle GPT5 specific logic""" |
|
|
payload = json.dumps({ |
|
|
"messages":[ |
|
|
{ |
|
|
"role": "user", |
|
|
"content": [ |
|
|
{ |
|
|
"type": "text", |
|
|
"text": processed_prompt |
|
|
} |
|
|
] |
|
|
} |
|
|
], |
|
|
"max_completion_tokens": self.max_new_tokens, |
|
|
"temperature": self.temperature, |
|
|
}) |
|
|
|
|
|
|
|
|
response = requests.request("POST", self.url, headers=self.headers, data=payload).json() |
|
|
|
|
|
|
|
|
return response["choices"][0]["message"]["content"], None |
|
|
|
|
|
class ModelManagerGPT4o(ModelManagerBase): |
|
|
"""GPT4o model manager""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
tokenizer_path: str = "model/Tokenizers/gpt", |
|
|
context_max_length: int = 120000, |
|
|
url: str = "https://runway.devops.rednote.life/openai/chat/completions?api-version=2025-01-01-preview", |
|
|
api_key: str = "9d876c24a1d74e218e69339258db13a3", |
|
|
temperature: float = 1.0, |
|
|
max_new_tokens: int = 7168, |
|
|
timeout: int = 1200, |
|
|
max_tries: int = 5, |
|
|
time_sleep: float = 1.0, |
|
|
): |
|
|
super().__init__(tokenizer_path, context_max_length, url, api_key, |
|
|
temperature, max_new_tokens, timeout, max_tries, time_sleep) |
|
|
self.headers = { |
|
|
'api-key': self.api_key, |
|
|
'Content-Type': 'application/json' |
|
|
} |
|
|
|
|
|
@model_query_decorator |
|
|
def query(self, processed_prompt: str) -> Tuple[str, Optional[str]]: |
|
|
"""Query LLM model - only handle GPT4o specific logic""" |
|
|
payload = json.dumps({ |
|
|
"messages":[ |
|
|
{ |
|
|
"role": "user", |
|
|
"content": [ |
|
|
{ |
|
|
"type": "text", |
|
|
"text": processed_prompt |
|
|
} |
|
|
] |
|
|
} |
|
|
], |
|
|
"max_tokens": self.max_new_tokens, |
|
|
"temperature": self.temperature, |
|
|
}) |
|
|
|
|
|
|
|
|
response = requests.request("POST", self.url, headers=self.headers, data=payload).json() |
|
|
|
|
|
|
|
|
return response["choices"][0]["message"]["content"], None |
|
|
|
|
|
|
|
|
class ModelManagerClaude4(ModelManagerBase): |
|
|
"""Claude 4 thinking model manager""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
tokenizer_path: str = "model/Tokenizers/claude", |
|
|
context_max_length: int = 1000000, |
|
|
url: str = "https://runway.devops.rednote.life/openai/bedrock_runtime/model/invoke", |
|
|
api_key: str = "899efa27c7c74654bb561242e1a0e423", |
|
|
temperature: float = 1.0, |
|
|
max_new_tokens: int = 32768, |
|
|
timeout: int = 1200, |
|
|
max_tries: int = 5, |
|
|
time_sleep: float = 1.0, |
|
|
): |
|
|
super().__init__(tokenizer_path, context_max_length, url, api_key, |
|
|
temperature, max_new_tokens, timeout, max_tries, time_sleep) |
|
|
self.headers = { |
|
|
'token': self.api_key, |
|
|
'Content-Type': 'application/json' |
|
|
} |
|
|
|
|
|
@model_query_decorator |
|
|
def query(self, processed_prompt: str) -> Tuple[str, Optional[str]]: |
|
|
"""Query LLM model - only handle Claude4 specific logic""" |
|
|
|
|
|
payload = json.dumps({ |
|
|
"anthropic_version": "bedrock-2023-05-31", |
|
|
"max_tokens": self.max_new_tokens, |
|
|
"temperature": self.temperature, |
|
|
"anthropic_beta": ["context-1m-2025-08-07"], |
|
|
"thinking": { |
|
|
"type": "enabled", |
|
|
"budget_tokens": self.max_new_tokens - 1024 |
|
|
}, |
|
|
"messages": [ |
|
|
{ |
|
|
"role": "user", |
|
|
"content": [ |
|
|
{ |
|
|
"type": "text", |
|
|
"text": processed_prompt |
|
|
} |
|
|
] |
|
|
} |
|
|
] |
|
|
}) |
|
|
|
|
|
|
|
|
response = requests.post(self.url, headers=self.headers, data=payload).json() |
|
|
|
|
|
|
|
|
return response["content"][1]["text"], response["content"][0]["thinking"] |
|
|
|
|
|
class ModelManagerClaude4Nonthinking(ModelManagerBase): |
|
|
"""Claude 4 nonthinking model manager""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
tokenizer_path: str = "model/Tokenizers/claude", |
|
|
context_max_length: int = 1000000, |
|
|
url: str = "https://runway.devops.rednote.life/openai/bedrock_runtime/model/invoke", |
|
|
api_key: str = "899efa27c7c74654bb561242e1a0e423", |
|
|
temperature: float = 1.0, |
|
|
max_new_tokens: int = 1024, |
|
|
timeout: int = 1200, |
|
|
max_tries: int = 5, |
|
|
time_sleep: float = 1.0, |
|
|
): |
|
|
super().__init__(tokenizer_path, context_max_length, url, api_key, |
|
|
temperature, max_new_tokens, timeout, max_tries, time_sleep) |
|
|
self.headers = { |
|
|
'token': self.api_key, |
|
|
'Content-Type': 'application/json' |
|
|
} |
|
|
|
|
|
@model_query_decorator |
|
|
def query(self, processed_prompt: str) -> Tuple[str, Optional[str]]: |
|
|
"""Query LLM model - only handle Claude4 specific logic""" |
|
|
|
|
|
payload = json.dumps({ |
|
|
"anthropic_version": "bedrock-2023-05-31", |
|
|
"max_tokens": self.max_new_tokens, |
|
|
"temperature": self.temperature, |
|
|
"anthropic_beta": ["context-1m-2025-08-07"], |
|
|
"thinking": { |
|
|
"type": "disabled", |
|
|
}, |
|
|
"messages": [ |
|
|
{ |
|
|
"role": "user", |
|
|
"content": [ |
|
|
{ |
|
|
"type": "text", |
|
|
"text": processed_prompt |
|
|
} |
|
|
] |
|
|
} |
|
|
] |
|
|
}) |
|
|
|
|
|
|
|
|
response = requests.post(self.url, headers=self.headers, data=payload).json() |
|
|
|
|
|
|
|
|
return response["content"][0]["text"], None |
|
|
|
|
|
|
|
|
class ModelManagerClaude37(ModelManagerBase): |
|
|
"""Claude 3.7 thinking model manager""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
tokenizer_path: str = "model/Tokenizers/claude", |
|
|
context_max_length: int = 200000, |
|
|
url: str = "https://runway.devops.rednote.life/openai/bedrock_runtime/model/invoke", |
|
|
api_key: str = "ff15724dce4d4c1e95939efd2f40628f", |
|
|
temperature: float = 1.0, |
|
|
max_new_tokens: int = 32768, |
|
|
timeout: int = 1200, |
|
|
max_tries: int = 5, |
|
|
time_sleep: float = 1.0, |
|
|
): |
|
|
super().__init__(tokenizer_path, context_max_length, url, api_key, |
|
|
temperature, max_new_tokens, timeout, max_tries, time_sleep) |
|
|
self.headers = { |
|
|
'token': self.api_key, |
|
|
'Content-Type': 'application/json' |
|
|
} |
|
|
|
|
|
@model_query_decorator |
|
|
def query(self, processed_prompt: str) -> Tuple[str, Optional[str]]: |
|
|
"""Query LLM model - only handle Claude4 specific logic""" |
|
|
|
|
|
payload = json.dumps({ |
|
|
"anthropic_version": "bedrock-2023-05-31", |
|
|
"max_tokens": self.max_new_tokens, |
|
|
"temperature": self.temperature, |
|
|
"thinking": { |
|
|
"type": "enabled", |
|
|
"budget_tokens": self.max_new_tokens - 200 |
|
|
}, |
|
|
"messages": [ |
|
|
{ |
|
|
"role": "user", |
|
|
"content": [ |
|
|
{ |
|
|
"type": "text", |
|
|
"text": processed_prompt |
|
|
} |
|
|
] |
|
|
} |
|
|
] |
|
|
}) |
|
|
|
|
|
|
|
|
response = requests.post(self.url, headers=self.headers, data=payload).json() |
|
|
|
|
|
|
|
|
return response["content"][1]["text"], response["content"][0]["thinking"] |
|
|
|
|
|
class ModelManagerClaude37Nonthinking(ModelManagerBase): |
|
|
"""Claude 3.7 nonthinking model manager""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
tokenizer_path: str = "model/Tokenizers/claude", |
|
|
context_max_length: int = 200000, |
|
|
url: str = "https://runway.devops.rednote.life/openai/bedrock_runtime/model/invoke", |
|
|
api_key: str = "ff15724dce4d4c1e95939efd2f40628f", |
|
|
temperature: float = 1.0, |
|
|
max_new_tokens: int = 1024, |
|
|
timeout: int = 1200, |
|
|
max_tries: int = 5, |
|
|
time_sleep: float = 1.0, |
|
|
): |
|
|
super().__init__(tokenizer_path, context_max_length, url, api_key, |
|
|
temperature, max_new_tokens, timeout, max_tries, time_sleep) |
|
|
self.headers = { |
|
|
'token': self.api_key, |
|
|
'Content-Type': 'application/json' |
|
|
} |
|
|
|
|
|
@model_query_decorator |
|
|
def query(self, processed_prompt: str) -> Tuple[str, Optional[str]]: |
|
|
"""Query LLM model - only handle Claude4 specific logic""" |
|
|
|
|
|
payload = json.dumps({ |
|
|
"anthropic_version": "bedrock-2023-05-31", |
|
|
"max_tokens": self.max_new_tokens, |
|
|
"temperature": self.temperature, |
|
|
"thinking": { |
|
|
"type": "disabled", |
|
|
}, |
|
|
"messages": [ |
|
|
{ |
|
|
"role": "user", |
|
|
"content": [ |
|
|
{ |
|
|
"type": "text", |
|
|
"text": processed_prompt |
|
|
} |
|
|
] |
|
|
} |
|
|
] |
|
|
}) |
|
|
|
|
|
|
|
|
response = requests.post(self.url, headers=self.headers, data=payload).json() |
|
|
|
|
|
|
|
|
return response["content"][0]["text"], None |
|
|
|
|
|
class ModelManagerKimi(ModelManagerBase): |
|
|
"""Kimi model manager""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
tokenizer_path: str = "/cpfs/user/chengfeng/huggingface/models/moonshotai/Kimi-K2-Instruct", |
|
|
context_max_length: int = 224000, |
|
|
url: str = "https://runway.devops.xiaohongshu.com/openai/moonshot/v1/chat/completions", |
|
|
api_key: str = "ea70f961e2e94024b0e8a2037ae9b477", |
|
|
temperature: float = 0.6, |
|
|
max_new_tokens: int = 32768, |
|
|
timeout: int = 1200, |
|
|
max_tries: int = 5, |
|
|
time_sleep: float = 1.0, |
|
|
): |
|
|
super().__init__(tokenizer_path, context_max_length, url, api_key, |
|
|
temperature, max_new_tokens, timeout, max_tries, time_sleep) |
|
|
self.headers = { |
|
|
'api-key': self.api_key, |
|
|
'Content-Type': 'application/json' |
|
|
} |
|
|
|
|
|
@model_query_decorator |
|
|
def query(self, processed_prompt: str) -> Tuple[str, Optional[str]]: |
|
|
"""Query LLM model - only handle Kimi specific logic""" |
|
|
payload = json.dumps({ |
|
|
"model": "kimi-k2-0905-preview", |
|
|
"messages":[ |
|
|
{ |
|
|
"role": "user", |
|
|
"content": [ |
|
|
{ |
|
|
"type": "text", |
|
|
"text": processed_prompt |
|
|
} |
|
|
] |
|
|
} |
|
|
], |
|
|
"max_tokens": self.max_new_tokens, |
|
|
"temperature": self.temperature, |
|
|
"timeout": self.timeout |
|
|
}) |
|
|
|
|
|
|
|
|
response = requests.post(self.url, headers=self.headers, data=payload).json() |
|
|
|
|
|
|
|
|
return response["choices"][0]["message"]["content"], None |
|
|
|
|
|
if __name__ == "__main__": |
|
|
model_manager = ModelManagerGemini3() |
|
|
answer, thinking = model_manager.query("Hello, how are you?") |
|
|
print("Answer:", answer) |
|
|
print("Thinking:", thinking) |
|
|
|