File size: 1,480 Bytes
f7e033c
16db363
f7e033c
a9ed66b
f7e033c
 
581caf0
c20ce40
2244a09
3490191
 
8c8ecdc
 
c20ce40
 
 
45182a1
9bc5e26
6928ae2
45182a1
 
6928ae2
d8793bf
f4e42de
30f62bc
a9ed66b
f4e42de
a9ed66b
 
cc16ea6
a9ed66b
 
 
 
3a7b25b
a9ed66b
48bbfd9
 
 
6928ae2
64f5054
48bbfd9
8d3245e
e154366
a4ea02b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import gradio as gr
#import torch
#from torch import autocast // only for GPU

from PIL import Image

import os
MY_SECRET_TOKEN=os.environ.get('HF_TOKEN_SD')

from diffusers import StableDiffusionPipeline
#from diffusers import StableDiffusionImg2ImgPipeline

print("hello sylvain")

YOUR_TOKEN=MY_SECRET_TOKEN

device="cpu"

pipe = StableDiffusionPipeline.from_pretrained("Yntec/YiffyPixar", use_auth_token=YOUR_TOKEN)
pipe.to(device)

gallery = gr.Gallery(label="Generated image", show_label=False, elem_id="gallery").style(grid=[2], height="auto")

def infer(prompt, steps=21): 
    
    #image = pipe(prompt, init_image=init_image)["sample"][0]
    images_list = pipe([prompt], num_inference_steps=steps)
    images = []
    safe_image = Image.open(r"unsafe.png")
    for i, image in enumerate(images_list["images"]):
        if(images_list["nsfw_content_detected"][i]):
            images.append(safe_image)
        else:
            images.append(image)
    
    return images

print("Great sylvain ! Everything is working fine !")

title="YiffyPixar"
description="It takes about 600 seconds to generate an image per user. If you like this model, you can like it at its original page! https://huggingface.co/Yntec/YiffyPixar" 

gr.Interface(fn=infer, inputs=["text", 
                               gr.Slider(20, 21, value = 21, step = 1, label = 'Steps', visible = False)], outputs=gallery,title=title,description=description).queue(max_size=10).launch(enable_queue=True)