File size: 6,491 Bytes
0f0f77d
f618fe2
 
03f54fd
f618fe2
eb643af
 
b42ba03
 
16a72df
f618fe2
38f9c0c
0afa2d5
16a72df
 
 
 
 
0afa2d5
 
 
 
 
38f9c0c
 
16a72df
0afa2d5
16a72df
0afa2d5
f618fe2
b42ba03
2dca569
 
 
 
 
b42ba03
1146eca
b42ba03
2dca569
 
 
 
 
0127a4a
b42ba03
 
 
0afa2d5
24c0210
4f72738
1ea7808
306ec4b
4f72738
b42ba03
24c0210
edf06e3
b42ba03
16a72df
b42ba03
 
16a72df
b42ba03
f618fe2
 
16a72df
b42ba03
598abe4
b42ba03
f618fe2
 
b0a4712
f618fe2
 
16a72df
0f73f3e
dc06e3d
 
 
 
16a72df
dc06e3d
 
16a72df
b42ba03
12496bc
b0a4712
b42ba03
16a72df
598abe4
b42ba03
16a72df
bdf1314
b42ba03
 
16a72df
0f73f3e
dc06e3d
 
f618fe2
 
0afa2d5
4ccfeec
b0a4712
f618fe2
 
 
 
2dca569
 
f618fe2
 
 
 
 
 
 
2dca569
 
f618fe2
 
 
 
 
b42ba03
 
 
079f683
b42ba03
 
 
 
0afa2d5
16a72df
 
 
0afa2d5
b0a4712
b42ba03
 
 
 
 
 
 
b0a4712
b42ba03
0afa2d5
b42ba03
 
dc06e3d
b42ba03
16a72df
 
 
f618fe2
 
 
dc06e3d
 
b0a4712
dc06e3d
 
 
 
f618fe2
0f0f77d
 
f618fe2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
import gradio as gr
from llama_cpp import Llama
from huggingface_hub import hf_hub_download
import time

MODEL_REPO = "Kezovic/iris-q4gguf-nitrallora"
MODEL_FILE = "Llama-3.2-1B-Instruct.Q4_K_M.gguf"
CONTEXT_WINDOW = 2048
MAX_NEW_TOKENS = 400
TEMPERATURE = 1.5 

def create_llama_instance(model_path):
    return Llama(
                model_path=model_path,
                n_ctx=CONTEXT_WINDOW,
                n_threads=2,
                verbose=False,
                min_p = 0.1
    )
    
def load_llm():
    
    model_path = hf_hub_download(repo_id=MODEL_REPO, filename=MODEL_FILE)
    llm_generator = create_llama_instance(model_path)
    llm_analyst = create_llama_instance(model_path)

    return llm_generator, llm_analyst

llm_generator, llm_analyst = load_llm()

persona_map = {
        "Grumpy Pirate": "You are a grumpy and annoyed pirate captain. Use salty nautical slang, complain about the sea and use 'Arrr' and 'matey'.",
        "Philosopher": "You are a philosopher. Use metaphors and reflective musings about the human condition.",
        "Ancient Wizard": "You are an ancient wizard. Speak in mystical tones. Use references to magic.",
        "Shakespearean Actor": "You are a Shakespearean actor. Use Early Modern English (thee, thou) and dramatic flair.",
        "Sarcastic Teenager": "You are a sarcastic teenager. Use dry humor."
}

format_map = {
        "Free Verse": "Write in Free Verse about the topic. Focus on vivid imagery and emotion.",
        "Ballad": "Write a Ballad about the topic. It should tell a narrative story with a clear beginning, middle, and end. Use four-line stanzas (quatrains) and a simple rhyme scheme like A-B-C-B. Keep the language simple and musical.",
        "Ode": "Write an Ode about the topic. Use elevated, expressive language to praise or celebrate the subject. Focus on strong emotion, rich imagery, and admiration.",
        "Elegy": "Write an Elegy about the topic. Use a somber, reflective tone to mourn a loss or contemplate death. Focus on sorrow, remembrance, and emotional depth.",
        "Hymn": "Write a Hymn about the topic. Use a reverent, uplifting tone. Focus on praise, devotion, or spiritual reflection, with a rhythmic, chant-like flow.",
        "Epic": "Write an Epic about the topic. Use grand, dramatic language."
}

def build_poem_prompt(voice_instruction, constraint_instruction, format_type, topic):
    
    full_instruction = (
        f"Instruction:\n"
        f"{voice_instruction}\n"
        f"{constraint_instruction} Your response should only contain the text of the {format_type} that you generate.\n"
        f"Write the {format_type} about this topic: '{topic}'.\n\n"
    )
    return full_instruction

def generate_poem(format_type, persona, topic, progress=gr.Progress()):
    
    progress(0, desc="Consulting the Muse...")
    time.sleep(0.2)
        
    progress(0.2, desc=f"Summoning {persona} and drafting poem...")
    selected_voice = persona_map.get(persona, "You are a helpful assistant.")
    selected_constraint = format_map.get(format_type, "Write a poem.")
        
    full_poem_prompt = build_poem_prompt(selected_voice, selected_constraint, format_type, topic)
    output = llm_generator(
        prompt=full_poem_prompt,
        max_tokens=MAX_NEW_TOKENS,
        temperature=TEMPERATURE,
        stop=["STRICT FORMAT RULE:", "\n\n\n"], 
        echo=False
    )
        
    poem_text = output['choices'][0]['text'].strip()
    progress(1.0, desc="Poem draft complete!")
    return poem_text

def analyze_poem(poem_text, progress=gr.Progress()):

    progress(0, desc="Analyzing style for literary match...")
    time.sleep(0.2)
        
    poet_prompt = (
        f"Please analyze the poem below in five sentences. Focus only on analysis."
        f"POEM:\n{poem_text}\n"
    )
        
    output_poet = llm_analyst(
        prompt=poet_prompt,
        max_tokens=200, 
        temperature=0.8, 
        echo=False
    )
        
    poet_suggestion = output_poet['choices'][0]['text'].strip()
    progress(1.0, desc="Analysis complete!")
    return poet_suggestion

with gr.Blocks(title="The Poetry Workshop", theme=gr.themes.Soft()) as demo:
    gr.Markdown("# The Poetry Workshop - Collaborative Analyst")
    gr.Markdown("Generate a poem based on a topic and persona, then use the separate button to analyze the output.")
    
    with gr.Group():
        with gr.Row(equal_height=True):
            gr.Markdown("### I want to write a ")
            format_dropdown = gr.Dropdown(
                choices=["Free Verse", "Ballad", "Ode", "Elegy", "Hymn", "Epic"],
                value="Free Verse",
                label="Poem Type",
                show_label=False,
                container=False,
                scale=2
            )
            gr.Markdown("### in the style of ")
            persona_dropdown = gr.Dropdown(
                choices=["Grumpy Pirate", "Philosopher", "Ancient Wizard", "Shakespearean Actor", "Sarcastic Teenager"],
                value="Grumpy Pirate",
                label="Persona",
                show_label=False,
                container=False,
                scale=3
            )
        with gr.Row(equal_height=True):
            gr.Markdown("### about this topic: ")
            topic_input = gr.Textbox(
                placeholder="e.g., a dancing monkey, a school bully, a fair maiden",
                label="Topic",
                show_label=False,
                scale=5
            )
            generate_btn = gr.Button("Create Poem Draft", variant="primary", scale=1)
    
    gr.Markdown("---")
    
    analyze_btn = gr.Button("Analyze This Poem", variant="secondary")
    
    with gr.Row():
        output_display = gr.Textbox(
            label="Your Poem",
            lines=12,
            interactive=False,
            placeholder="Your masterpiece will appear here..."
        )
                
        poet_suggestion_display = gr.Textbox(
            label="Literary Analysis",
            lines=12,
            interactive=False,
            placeholder="Click 'Analyze This Poem' to run the LLM analysis."
        )

    gr.Markdown("---")
    
    generate_btn.click(
        fn=generate_poem,
        inputs=[format_dropdown, persona_dropdown, topic_input],
        outputs=[output_display] 
    )
    
    analyze_btn.click(
        fn=analyze_poem,
        inputs=[output_display], 
        outputs=poet_suggestion_display
    )

if __name__ == "__main__":
    demo.launch()