|
|
import gradio as gr |
|
|
from transformers import AutoTokenizer, AutoModelForSequenceClassification |
|
|
from deep_translator import GoogleTranslator |
|
|
from langdetect import detect |
|
|
import torch |
|
|
|
|
|
|
|
|
MODEL_NAME = "cardiffnlp/twitter-roberta-base-sentiment" |
|
|
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) |
|
|
model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME) |
|
|
|
|
|
|
|
|
emotion_labels = { |
|
|
0: "Negative π", |
|
|
1: "Neutral π", |
|
|
2: "Positive π" |
|
|
} |
|
|
|
|
|
|
|
|
translator = GoogleTranslator(source="auto", target="en") |
|
|
|
|
|
def analyze_sentiment(user_input): |
|
|
|
|
|
detected_language = detect(user_input) |
|
|
|
|
|
|
|
|
translated_text = translator.translate(user_input) if detected_language != "en" else user_input |
|
|
|
|
|
|
|
|
inputs = tokenizer(translated_text, return_tensors="pt", truncation=True, padding=True, max_length=512) |
|
|
|
|
|
with torch.no_grad(): |
|
|
outputs = model(**inputs) |
|
|
|
|
|
logits = outputs.logits |
|
|
predicted_class = torch.argmax(logits, dim=-1).item() |
|
|
emotion = emotion_labels.get(predicted_class, "Unknown") |
|
|
|
|
|
|
|
|
return emotion |
|
|
|
|
|
|
|
|
iface = gr.Interface( |
|
|
fn=analyze_sentiment, |
|
|
inputs=gr.Textbox(lines=3, placeholder="Enter text in any language..."), |
|
|
outputs=gr.Label(label="Predicted Sentiment"), |
|
|
title="π Multilingual Sentiment Analysis", |
|
|
description="Enter text in any language. The system will auto-detect, translate to English, and predict sentiment (Positive/Neutral/Negative)." |
|
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
iface.launch() |
|
|
|