Spaces:
Running
Running
| import torch | |
| from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline | |
| import os | |
| import logging | |
| # ๋ชจ๋ธ์ ์ ์ฅํ ์ ์ญ ๋ณ์ | |
| _classifier = None | |
| def load_emotion_classifier(): | |
| global _classifier | |
| # ๋ชจ๋ธ์ด ์ด๋ฏธ ๋ก๋๋์๋ค๋ฉด, ์ฆ์ ๋ฐํ | |
| if _classifier is not None: | |
| return _classifier | |
| # ๋ชจ๋ธ์ด ๋ก๋๋์ง ์์๋ค๋ฉด, ๋ก๋ ์์ | |
| MODEL_ID = "taehoon222/korean-emotion-classifier-final" | |
| logging.info(f"Hugging Face Hub ๋ชจ๋ธ '{MODEL_ID}'์์ ๋ชจ๋ธ์ ๋ถ๋ฌ์ต๋๋ค...") | |
| try: | |
| logging.info("ํ ํฌ๋์ด์ ๋ก๋ฉ ์ค...") | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) | |
| logging.info("๋ชจ๋ธ ๋ก๋ฉ ์ค...") | |
| model = AutoModelForSequenceClassification.from_pretrained(MODEL_ID) | |
| logging.info("Hugging Face Hub ๋ชจ๋ธ ๋ก๋ฉ ์ฑ๊ณต!") | |
| except Exception as e: | |
| logging.error(f"๋ชจ๋ธ ๋ก๋ฉ ์ค ์ค๋ฅ: {e}") | |
| return None | |
| device = 0 if torch.cuda.is_available() else -1 | |
| if device == 0: | |
| logging.info("Device set to use cuda (GPU)") | |
| else: | |
| logging.info("Device set to use cpu") | |
| # ๋ก๋๋ ๋ชจ๋ธ์ ์ ์ญ ๋ณ์์ ์ ์ฅ | |
| _classifier = pipeline("text-classification", model=model, tokenizer=tokenizer, device=device) | |
| return _classifier | |
| def predict_emotion(text, top_k=3): | |
| logging.info(f"predict_emotion ํจ์ ํธ์ถ๋จ. ํ ์คํธ ๊ธธ์ด: {len(text) if text else 0}, top_k={top_k}") | |
| classifier = load_emotion_classifier() | |
| if not text or not text.strip(): | |
| logging.warning("๋ถ์ํ ํ ์คํธ๊ฐ ๋น์ด์๊ฑฐ๋ ๊ณต๋ฐฑ์ ๋๋ค.") | |
| return [] | |
| if classifier is None: | |
| logging.error("๊ฐ์ ๋ถ์ ์์ง์ด ์ค๋น๋์ง ์์์ต๋๋ค.") | |
| return [] | |
| try: | |
| logging.info(f"๋ถ๋ฅ๊ธฐ ์คํ ์ค... ํ ์คํธ: {text[:50]}...") | |
| results = classifier(text, top_k=top_k) | |
| logging.info(f"๋ถ๋ฅ ๊ฒฐ๊ณผ (Top {top_k}): {results}") | |
| return results | |
| except Exception as e: | |
| logging.error(f"๊ฐ์ ๋ถ๋ฅ ์ค ์ค๋ฅ ๋ฐ์: {e}") | |
| return [] | |