import gradio as gr #import torch #from torch import autocast // only for GPU from PIL import Image import os MY_SECRET_TOKEN=os.environ.get('HF_TOKEN_SD') from diffusers import StableDiffusionPipeline #from diffusers import StableDiffusionImg2ImgPipeline print("hello sylvain") YOUR_TOKEN=MY_SECRET_TOKEN device="cpu" pipe = StableDiffusionPipeline.from_pretrained("Yntec/YiffyPixar", use_auth_token=YOUR_TOKEN) pipe.to(device) gallery = gr.Gallery(label="Generated image", show_label=False, elem_id="gallery").style(grid=[2], height="auto") def infer(prompt, steps=21): #image = pipe(prompt, init_image=init_image)["sample"][0] images_list = pipe([prompt], num_inference_steps=steps) images = [] safe_image = Image.open(r"unsafe.png") for i, image in enumerate(images_list["images"]): if(images_list["nsfw_content_detected"][i]): images.append(safe_image) else: images.append(image) return images print("Great sylvain ! Everything is working fine !") title="YiffyPixar" description="It takes about 600 seconds to generate an image per user. If you like this model, you can like it at its original page! https://huggingface.co/Yntec/YiffyPixar" gr.Interface(fn=infer, inputs=["text", gr.Slider(20, 21, value = 21, step = 1, label = 'Steps', visible = False)], outputs=gallery,title=title,description=description).queue(max_size=10).launch(enable_queue=True)