Spaces:
Running
Running
Commit
·
7a05623
1
Parent(s):
68d0201
fix: Increase LLM max_tokens to 2048 to prevent truncation [v1.5.2]
Browse files- app.py +1 -1
- llm_explainer.py +2 -2
app.py
CHANGED
|
@@ -35,7 +35,7 @@ except ImportError as e:
|
|
| 35 |
|
| 36 |
# --- CONFIG ---
|
| 37 |
MODELS_DIR = Path("models")
|
| 38 |
-
APP_VERSION = "1.5.
|
| 39 |
THRESHOLD_AUTO_FLAG = 0.53
|
| 40 |
|
| 41 |
# Model registry
|
|
|
|
| 35 |
|
| 36 |
# --- CONFIG ---
|
| 37 |
MODELS_DIR = Path("models")
|
| 38 |
+
APP_VERSION = "1.5.2"
|
| 39 |
THRESHOLD_AUTO_FLAG = 0.53
|
| 40 |
|
| 41 |
# Model registry
|
llm_explainer.py
CHANGED
|
@@ -104,7 +104,7 @@ def call_openai_format_api(config: dict, prompt: str) -> str:
|
|
| 104 |
"content": prompt
|
| 105 |
}
|
| 106 |
],
|
| 107 |
-
"max_tokens":
|
| 108 |
"temperature": 0.3
|
| 109 |
}
|
| 110 |
|
|
@@ -131,7 +131,7 @@ def call_huggingface_api(config: dict, prompt: str) -> str:
|
|
| 131 |
payload = {
|
| 132 |
"inputs": prompt,
|
| 133 |
"parameters": {
|
| 134 |
-
"max_new_tokens":
|
| 135 |
"temperature": 0.3,
|
| 136 |
"return_full_text": False
|
| 137 |
}
|
|
|
|
| 104 |
"content": prompt
|
| 105 |
}
|
| 106 |
],
|
| 107 |
+
"max_tokens": 2048,
|
| 108 |
"temperature": 0.3
|
| 109 |
}
|
| 110 |
|
|
|
|
| 131 |
payload = {
|
| 132 |
"inputs": prompt,
|
| 133 |
"parameters": {
|
| 134 |
+
"max_new_tokens": 2048,
|
| 135 |
"temperature": 0.3,
|
| 136 |
"return_full_text": False
|
| 137 |
}
|