Spaces:
Sleeping
Sleeping
File size: 4,514 Bytes
8ce854e 885d441 8ce854e 37a3025 8ce854e a8c2933 37a3025 8ce854e 0c682ea 37a3025 8ce854e 0c682ea dc0a7b5 0c682ea 8ce854e 0c682ea 8ce854e 0c682ea 8ce854e 0c682ea 8ce854e 0c682ea 8ce854e 0c682ea 8ce854e 0c682ea 8ce854e 0c682ea 37a3025 0c682ea 8ce854e 0c682ea 8ce854e 37a3025 8ce854e 73604d7 37a3025 dfb1346 8ce854e 37a3025 8ce854e 73604d7 37a3025 8ce854e 37a3025 8ce854e 0c682ea dfb1346 8ce854e 37a3025 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 |
import os
import torch
import PIL
from torchvision import transforms
from facenet_pytorch import MTCNN
from huggingface_hub import hf_hub_download
import gradio as gr
# --- 1. بارگذاری مدل v0.4 و تنظیمات اولیه برای CPU ---
print("Loading ArcaneGAN v0.4 model for CPU...")
DEVICE = torch.device("cpu")
print(f"Forcing execution on device: {DEVICE}")
model_path = hf_hub_download(repo_id="akhaliq/ArcaneGANv0.4", filename="ArcaneGANv0.4.jit")
print("Model downloaded. Loading to memory...")
# مدل را همانطور که هست (با نوع داده Half) بارگذاری میکنیم
model = torch.jit.load(model_path, map_location=DEVICE).eval()
mtcnn = MTCNN(image_size=256, margin=80, device=DEVICE, post_process=False)
print("Model v0.4 loaded. Will attempt to run with Half-precision input on CPU.")
# --- 2. توابع کمکی (با تغییر در proc_pil_img) ---
def detect(img):
batch_boxes, _ = mtcnn.detect(img)
return batch_boxes
def makeEven(_x):
return _x if (_x % 2 == 0) else _x + 1
def scale(boxes, _img, max_res=1_500_000, target_face=256, fixed_ratio=0, max_upscale=2):
# ... (بدون تغییر) ...
x, y = _img.size; ratio = 1.0
if boxes is not None and len(boxes) > 0:
face_box = boxes[0]
if face_box is not None:
face_w = face_box[2] - face_box[0]; face_h = face_box[3] - face_box[1]
if face_w > 0 and face_h > 0:
face_size = max(face_w, face_h)
if face_size > 0: ratio = target_face / face_size; ratio = min(ratio, max_upscale)
if fixed_ratio > 0: ratio = fixed_ratio
x = int(x * ratio); y = int(y * ratio); res = x * y
if res > max_res:
ratio_down = (res / max_res) ** 0.5; x = int(x / ratio_down); y = int(y / ratio_down)
size = (makeEven(x), makeEven(y))
return _img.resize(size, PIL.Image.LANCZOS)
def scale_by_face_size(_img, target_face=256, max_res=1_500_000, max_upscale=2):
boxes = detect(_img)
return scale(boxes, _img, max_res=max_res, target_face=target_face, max_upscale=max_upscale)
means = [0.485, 0.456, 0.406]
stds = [0.229, 0.224, 0.225]
img_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(means, stds)
])
def tensor2im(var):
# ... (بدون تغییر) ...
var = var.cpu().float()
t_stds_cpu = torch.tensor(stds)[:, None, None]
t_means_cpu = torch.tensor(means)[:, None, None]
return var.mul(t_stds_cpu).add(t_means_cpu).mul(255.).clamp(0, 255).permute(1, 2, 0)
# *** تغییر کلیدی در این تابع است ***
def proc_pil_img(input_image, model_to_use):
# 1. تصویر را به float32 تبدیل میکنیم (مثل قبل)
transformed_image = img_transforms(input_image)[None, ...].to(DEVICE)
# 2. حالا ورودی را به float16 (Half) تبدیل میکنیم تا با مدل مطابقت داشته باشد
transformed_image = transformed_image.half()
with torch.no_grad():
result_image = model_to_use(transformed_image)[0]
output_image = tensor2im(result_image)
output_image = output_image.detach().cpu().numpy().astype('uint8')
return PIL.Image.fromarray(output_image)
# --- 3. تابع اصلی پردازش (بدون تغییر) ---
def process(im):
print(f"Processing image with v0.4 on CPU. This will be slow.")
if im is None:
return None
im_scaled = scale_by_face_size(im, target_face=256, max_res=1_500_000, max_upscale=2)
res = proc_pil_img(im_scaled, model)
print("Processing finished.")
return res
# --- 4. ساخت و اجرای رابط کاربری Gradio (بدون تغییر) ---
title = "ArcaneGAN v0.4 (CPU Version)"
description = "Gradio demo for ArcaneGAN v0.4, portrait to Arcane style. Note: This version runs on CPU and will be very slow."
article = "<div style='text-align: center;'>ArcaneGan by <a href='https://twitter.com/devdef' target='_blank'>Alexander S</a> | <a href='https://github.com/Sxela/ArcaneGAN' target='_blank'>Github Repo</a> | <center><img src='https://visitor-badge.glitch.me/badge?page_id=akhaliq_arcanegan' alt='visitor badge'></center></div>"
interface = gr.Interface(
fn=process,
inputs=gr.Image(type="pil", label="Input"),
outputs=gr.Image(type="pil", label="Output"),
title=title,
description=description,
article=article,
examples=[['bill.png'],['keanu.png'],['will.jpeg']],
cache_examples=False
)
interface.launch() |