File size: 931 Bytes
43f79b8 e6d2e0d 7da580f 9448412 7da580f 43f79b8 298747f ca3abd0 fd1b427 7da580f 5d9a8f7 9448412 43f79b8 298747f 9be3bb0 5d8fb4d ca3abd0 f4fb822 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 |
from fastapi import FastAPI, Request
from huggingface_hub import InferenceClient
import os
app = FastAPI()
HF_TOKEN = os.getenv("HF_TOKEN") # đọc token từ Secrets
# client = InferenceClient(token=HF_TOKEN, model="google/gemma-3-12b-it", provider="featherless-ai")
client = InferenceClient(token=HF_TOKEN, model="meta-llama/Llama-3.2-3B-Instruct")
# client = InferenceClient(token=HF_TOKEN, model="Qwen/Qwen3-4B-Instruct-2507")
# client = InferenceClient(token=HF_TOKEN, model="swiss-ai/Apertus-70B-Instruct-2509")
@app.get("/")
def root():
return {"message": "Gemma 3 API on CPU"}
@app.post("/generate")
async def generate(request: Request):
body = await request.json()
prompt = body.get("prompt", "")
messages = [
{"role": "user", "content": prompt}
]
out = client.chat_completion(messages)
print(out)
response = out.choices[0].message.content
return {"response": response} |