jblast94 commited on
Commit
ec0c03a
·
verified ·
1 Parent(s): 508150e

Upload src/streamlit_app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +161 -40
src/streamlit_app.py CHANGED
@@ -1,40 +1,161 @@
1
- import altair as alt
2
- import numpy as np
3
- import pandas as pd
4
- import streamlit as st
5
-
6
- """
7
- # Welcome to Streamlit!
8
-
9
- Edit `/streamlit_app.py` to customize this app to your heart's desire :heart:.
10
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
11
- forums](https://discuss.streamlit.io).
12
-
13
- In the meantime, below is an example of what you can do with just a few lines of code:
14
- """
15
-
16
- num_points = st.slider("Number of points in spiral", 1, 10000, 1100)
17
- num_turns = st.slider("Number of turns in spiral", 1, 300, 31)
18
-
19
- indices = np.linspace(0, 1, num_points)
20
- theta = 2 * np.pi * num_turns * indices
21
- radius = indices
22
-
23
- x = radius * np.cos(theta)
24
- y = radius * np.sin(theta)
25
-
26
- df = pd.DataFrame({
27
- "x": x,
28
- "y": y,
29
- "idx": indices,
30
- "rand": np.random.randn(num_points),
31
- })
32
-
33
- st.altair_chart(alt.Chart(df, height=700, width=700)
34
- .mark_point(filled=True)
35
- .encode(
36
- x=alt.X("x", axis=None),
37
- y=alt.Y("y", axis=None),
38
- color=alt.Color("idx", legend=None, scale=alt.Scale()),
39
- size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
40
- ))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ === app.py ===
2
+ ```python
3
+ import gradio as gr
4
+ import time
5
+ import os
6
+
7
+ import requests
8
+ import tempfile
9
+ import json
10
+ from PIL import Image
11
+
12
+ def replace_character(source_image, target_video, use_lora=False, progress=gr.Progress(track_tqdm=True)):
13
+ """
14
+ Performs character replacement in video using Hugging Face inference and Wan2.2 model.
15
+
16
+ Args:
17
+ source_image (str): File path to the source character image.
18
+ target_video (str): File path to the target video.
19
+ use_lora (bool): Whether to use LoRA for fine-tuned character generation.
20
+ progress (gr.Progress): Gradio progress tracker to show processing status.
21
+
22
+ Returns:
23
+ str: File path of the processed video.
24
+ """
25
+ if source_image is None:
26
+ raise gr.Error("Please upload a character reference image.")
27
+ if target_video is None:
28
+ raise gr.Error("Please upload a target video.")
29
+
30
+ # Prepare the API request for Hugging Face inference
31
+ API_URL = "https://api-inference.huggingface.co/models/wan2.2/image-to-video"
32
+ headers = {"Authorization": f"Bearer {os.environ.get('HF_API_KEY', 'your_huggingface_token')}"}
33
+
34
+ # Process stages
35
+ stages = [
36
+ "Processing character reference image...",
37
+ "Uploading to Hugging Face inference...",
38
+ "Running Wan2.2 image-to-video generation...",
39
+ "Applying character replacement...",
40
+ "Rendering final video..."
41
+ ]
42
+
43
+ for stage in progress.tqdm(stages, desc="Processing video"):
44
+ time.sleep(1.5)
45
+
46
+ try:
47
+ # Prepare the payload
48
+ with open(source_image, "rb") as f:
49
+ image_data = f.read()
50
+
51
+ # Prepare additional parameters for LoRA if enabled
52
+ parameters = {}
53
+ if use_lora:
54
+ parameters["lora_scale"] = 1.0
55
+ parameters["use_lora"] = True
56
+
57
+ # Make the API request
58
+ response = requests.post(
59
+ API_URL,
60
+ headers=headers,
61
+ data=image_data,
62
+ params=parameters if parameters else None
63
+ )
64
+
65
+ if response.status_code == 200:
66
+ # Save the generated video to a temporary file
67
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as tmp_file:
68
+ tmp_file.write(response.content)
69
+ return tmp_file.name
70
+ else:
71
+ raise gr.Error(f"Hugging Face API error: {response.status_code} - {response.text}")
72
+
73
+ except Exception as e:
74
+ raise gr.Error(f"Error during character replacement: {str(e)}")
75
+
76
+ # Define the Gradio interface using gr.Blocks for a custom layout
77
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
78
+ # Header and description
79
+ gr.HTML(
80
+ """
81
+ <div style="text-align: center; max-width: 800px; margin: 0 auto;">
82
+ <h1 style="font-weight: 900; font-size: 2.5rem;">🎭 AI Video Character Replacement</h1>
83
+ <p style="margin-top: 1rem; font-size: 1.1rem; color: #4B5563;">
84
+ Provide a reference image of a character and a target video. The AI will replace a character in the video with the one from your image using Hugging Face Wan2.2 model.
85
+ <br>
86
+ <em>Set HF_API_KEY environment variable with your Hugging Face token.</em>
87
+ </p>
88
+ <a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank" style="text-decoration: none; color: #3B82F6; font-weight: 500;">
89
+ Built with anycoder
90
+ </a>
91
+ </div>
92
+ """
93
+ )
94
+
95
+ # Main UI layout with inputs and outputs
96
+ with gr.Row(variant="panel", equal_height=True):
97
+ # Input column
98
+ with gr.Column(scale=1, min_width=300):
99
+ gr.Markdown("### 1. Inputs")
100
+ source_image = gr.Image(
101
+ type="filepath",
102
+ label="Character Reference Image",
103
+ info="Upload a clear image of the character you want to insert."
104
+ )
105
+ target_video = gr.Video(
106
+ label="Target Video",
107
+ info="Upload the video where you want to replace a character."
108
+ )
109
+ use_lora = gr.Checkbox(
110
+ label="Use LoRA for fine-tuned generation",
111
+ info="Enable for better character consistency using LoRA adapters",
112
+ value=False
113
+ )
114
+ submit_btn = gr.Button("✨ Replace Character", variant="primary")
115
+
116
+ # Output column
117
+ with gr.Column(scale=1, min_width=300):
118
+ gr.Markdown("### 2. Result")
119
+ output_video = gr.Video(
120
+ label="Result Video",
121
+ interactive=False,
122
+ info="The processed video will appear here."
123
+ )
124
+
125
+ # Link the button to the processing function
126
+ submit_btn.click(
127
+ fn=replace_character,
128
+ inputs=[source_image, target_video, use_lora],
129
+ outputs=output_video
130
+ )
131
+
132
+ # Launch the application
133
+ if __name__ == "__main__":
134
+ demo.launch()
135
+ ```
136
+
137
+ === requirements.txt ===
138
+ gradio
139
+ requests
140
+ Pillow
141
+ numpy
142
+ torch
143
+ torchvision
144
+ torchaudio
145
+ git+https://github.com/huggingface/transformers
146
+ git+https://github.com/huggingface/diffusers
147
+ sentencepiece
148
+ accelerate
149
+ tokenizers
150
+ datasets
151
+ scipy
152
+ joblib
153
+ openpyxl
154
+ python-docx
155
+ PyPDF2
156
+ uvicorn
157
+ pydantic
158
+ matplotlib
159
+ pandas
160
+ opencv-python
161
+ scikit-learn