Update app.py
Browse files
app.py
CHANGED
|
@@ -9,6 +9,7 @@ HF_TOKEN = os.getenv("HF_TOKEN") # đọc token từ Secrets
|
|
| 9 |
|
| 10 |
client = InferenceClient(token=HF_TOKEN, model=MODEL_ID)
|
| 11 |
|
|
|
|
| 12 |
@app.get("/")
|
| 13 |
def root():
|
| 14 |
return {"message": "Gemma 3 API on CPU"}
|
|
@@ -17,10 +18,13 @@ def root():
|
|
| 17 |
@app.post("/generate")
|
| 18 |
async def generate(request: Request):
|
| 19 |
body = await request.json()
|
|
|
|
| 20 |
prompt = body.get("prompt", "")
|
| 21 |
max_new_tokens = body.get("max_new_tokens", 128)
|
| 22 |
temperature = body.get("temperature", 0.7)
|
| 23 |
|
|
|
|
|
|
|
| 24 |
out = client.text_generation(prompt, max_new_tokens=200)
|
| 25 |
|
| 26 |
return {"text": out}
|
|
|
|
| 9 |
|
| 10 |
client = InferenceClient(token=HF_TOKEN, model=MODEL_ID)
|
| 11 |
|
| 12 |
+
|
| 13 |
@app.get("/")
|
| 14 |
def root():
|
| 15 |
return {"message": "Gemma 3 API on CPU"}
|
|
|
|
| 18 |
@app.post("/generate")
|
| 19 |
async def generate(request: Request):
|
| 20 |
body = await request.json()
|
| 21 |
+
|
| 22 |
prompt = body.get("prompt", "")
|
| 23 |
max_new_tokens = body.get("max_new_tokens", 128)
|
| 24 |
temperature = body.get("temperature", 0.7)
|
| 25 |
|
| 26 |
+
print(HF_TOKEN)
|
| 27 |
+
|
| 28 |
out = client.text_generation(prompt, max_new_tokens=200)
|
| 29 |
|
| 30 |
return {"text": out}
|