llama / app.py
quan1998's picture
Update app.py
ca3abd0 verified
raw
history blame contribute delete
931 Bytes
from fastapi import FastAPI, Request
from huggingface_hub import InferenceClient
import os
app = FastAPI()
HF_TOKEN = os.getenv("HF_TOKEN") # đọc token từ Secrets
# client = InferenceClient(token=HF_TOKEN, model="google/gemma-3-12b-it", provider="featherless-ai")
client = InferenceClient(token=HF_TOKEN, model="meta-llama/Llama-3.2-3B-Instruct")
# client = InferenceClient(token=HF_TOKEN, model="Qwen/Qwen3-4B-Instruct-2507")
# client = InferenceClient(token=HF_TOKEN, model="swiss-ai/Apertus-70B-Instruct-2509")
@app.get("/")
def root():
return {"message": "Gemma 3 API on CPU"}
@app.post("/generate")
async def generate(request: Request):
body = await request.json()
prompt = body.get("prompt", "")
messages = [
{"role": "user", "content": prompt}
]
out = client.chat_completion(messages)
print(out)
response = out.choices[0].message.content
return {"response": response}