|
|
|
|
|
|
|
|
import gradio as gr |
|
|
from huggingface_hub import InferenceClient |
|
|
from PIL import Image |
|
|
import io |
|
|
import os |
|
|
import numpy as np |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
HF_API_TOKEN = os.environ.get("HF_API_TOKEN") |
|
|
client = InferenceClient(token=HF_API_TOKEN) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
MODEL_1 = "prithivMLmods/deepfake-detector-model-v1" |
|
|
MODEL_2 = "microsoft/dit-base-finetuned-aigc-detection" |
|
|
MODEL_3 = "zhipeixu/fakeshield-v1-22b" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def overlay_mask(image, mask): |
|
|
if mask is None: |
|
|
return image |
|
|
mask = np.array(mask.convert("L")) |
|
|
mask = (mask > 128).astype(np.uint8) * 255 |
|
|
overlay = Image.new("RGBA", image.size, (255,0,0,100)) |
|
|
img_rgba = image.convert("RGBA") |
|
|
img_rgba.paste(overlay, mask=Image.fromarray(mask)) |
|
|
return img_rgba |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def analyze_image(image): |
|
|
buf = io.BytesIO() |
|
|
image.save(buf, format="PNG") |
|
|
buf.seek(0) |
|
|
|
|
|
|
|
|
try: |
|
|
out1 = client.image_classification(model=MODEL_1, inputs=buf) |
|
|
label1 = out1[0]["label"] |
|
|
score1 = round(out1[0]["score"] * 100, 2) |
|
|
except: |
|
|
label1, score1 = "Error", 0 |
|
|
buf.seek(0) |
|
|
|
|
|
|
|
|
try: |
|
|
out2 = client.image_classification(model=MODEL_2, inputs=buf) |
|
|
label2 = out2[0]["label"] |
|
|
score2 = round(out2[0]["score"] * 100, 2) |
|
|
except: |
|
|
label2, score2 = "Error", 0 |
|
|
buf.seek(0) |
|
|
|
|
|
|
|
|
try: |
|
|
out3 = client(inputs=buf, model=MODEL_3) |
|
|
explanation = out3.get("explanation", "No manipulation detected") |
|
|
mask = out3.get("mask", None) |
|
|
except: |
|
|
explanation, mask = "Error detecting forgery", None |
|
|
|
|
|
|
|
|
ai_votes = 0 |
|
|
if "fake" in label1.lower() or "ai" in label1.lower(): |
|
|
ai_votes += 1 |
|
|
if "ai" in label2.lower() or "generated" in label2.lower(): |
|
|
ai_votes += 1 |
|
|
|
|
|
if ai_votes == 2: |
|
|
final_label = "AI-GENERATED" |
|
|
elif ai_votes == 1: |
|
|
final_label = "Possibly AI-GENERATED" |
|
|
else: |
|
|
final_label = "REAL IMAGE" |
|
|
|
|
|
|
|
|
output_image = overlay_mask(image, mask) |
|
|
|
|
|
|
|
|
return ( |
|
|
output_image, |
|
|
f"{final_label}", |
|
|
f"Deepfake Model: {label1} ({score1}%)\nAIGC Model: {label2} ({score2}%)\nForgery Detector: {explanation}" |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
gr.Markdown("<h2 style='text-align:center'>AI DeepFake & Manipulation Detector</h2>") |
|
|
|
|
|
with gr.Row(): |
|
|
inp = gr.Image(type="pil", label="Upload Image") |
|
|
out_img = gr.Image(type="pil", label="Result Image with Mask Overlay") |
|
|
|
|
|
out_text = gr.Textbox(label="Detection Result & Explanation", lines=8) |
|
|
|
|
|
btn = gr.Button("Analyze Image") |
|
|
btn.click(fn=analyze_image, inputs=[inp], outputs=[out_img, out_text, out_text]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
demo.launch() |
|
|
|