Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -134,51 +134,25 @@ class PromptRefiner:
|
|
| 134 |
{
|
| 135 |
"role": "system",
|
| 136 |
"content": """You are a markdown formatting expert. Format your responses with proper spacing and structure following these rules:
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
### Subsection
|
| 158 |
-
[blank line]
|
| 159 |
-
Content
|
| 160 |
-
[blank line]
|
| 161 |
-
|
| 162 |
-
3. List Formatting:
|
| 163 |
-
[blank line]
|
| 164 |
-
- List item 1
|
| 165 |
-
- List item 2
|
| 166 |
-
- List item 3
|
| 167 |
-
[blank line]
|
| 168 |
-
|
| 169 |
-
4. JSON Output Structure:
|
| 170 |
-
{
|
| 171 |
-
"section_name": "
|
| 172 |
-
Content paragraph 1
|
| 173 |
-
|
| 174 |
-
Content paragraph 2
|
| 175 |
-
|
| 176 |
-
- List item 1
|
| 177 |
-
- List item 2
|
| 178 |
-
"
|
| 179 |
-
}
|
| 180 |
-
|
| 181 |
-
Transform content while maintaining clear visual separation between elements."""
|
| 182 |
},
|
| 183 |
{
|
| 184 |
"role": "user",
|
|
@@ -186,27 +160,27 @@ class PromptRefiner:
|
|
| 186 |
}
|
| 187 |
]
|
| 188 |
|
| 189 |
-
|
| 190 |
-
response_stream = self.client.text_generation(
|
| 191 |
model=model,
|
| 192 |
messages=messages,
|
| 193 |
max_tokens=3000,
|
| 194 |
temperature=0.8,
|
| 195 |
-
stream=True
|
| 196 |
)
|
| 197 |
|
| 198 |
-
# Initialize an empty string to
|
| 199 |
full_response = ""
|
| 200 |
|
| 201 |
-
# Process the
|
| 202 |
-
for
|
| 203 |
-
if
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
|
|
|
| 207 |
|
| 208 |
except Exception as e:
|
| 209 |
-
|
| 210 |
|
| 211 |
class GradioInterface:
|
| 212 |
def __init__(self, prompt_refiner: PromptRefiner,custom_css):
|
|
@@ -328,22 +302,13 @@ class GradioInterface:
|
|
| 328 |
full_response
|
| 329 |
)
|
| 330 |
|
| 331 |
-
|
| 332 |
-
|
| 333 |
-
|
| 334 |
-
|
| 335 |
-
|
| 336 |
-
|
| 337 |
-
|
| 338 |
-
# Return both outputs directly
|
| 339 |
-
return original_output, refined_output
|
| 340 |
-
|
| 341 |
-
except Exception as e:
|
| 342 |
-
# Return error messages for both outputs in case of failure
|
| 343 |
-
return f"Error: {str(e)}", f"Error: {str(e)}"
|
| 344 |
-
|
| 345 |
-
|
| 346 |
-
|
| 347 |
|
| 348 |
def launch(self, share=False):
|
| 349 |
self.interface.launch(share=share)
|
|
|
|
| 134 |
{
|
| 135 |
"role": "system",
|
| 136 |
"content": """You are a markdown formatting expert. Format your responses with proper spacing and structure following these rules:
|
| 137 |
+
|
| 138 |
+
1. Paragraph Spacing:
|
| 139 |
+
- Add TWO blank lines between major sections (##)
|
| 140 |
+
- Add ONE blank line between subsections (###)
|
| 141 |
+
- Add ONE blank line between paragraphs within sections
|
| 142 |
+
- Add ONE blank line before and after lists
|
| 143 |
+
- Add ONE blank line before and after code blocks
|
| 144 |
+
- Add ONE blank line before and after blockquotes
|
| 145 |
+
|
| 146 |
+
2. Section Formatting:
|
| 147 |
+
# Title
|
| 148 |
+
|
| 149 |
+
## Major Section
|
| 150 |
+
|
| 151 |
+
[blank line]
|
| 152 |
+
Content paragraph 1
|
| 153 |
+
[blank line]
|
| 154 |
+
Content paragraph 2
|
| 155 |
+
[blank line]"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 156 |
},
|
| 157 |
{
|
| 158 |
"role": "user",
|
|
|
|
| 160 |
}
|
| 161 |
]
|
| 162 |
|
| 163 |
+
response = self.client.chat_completion(
|
|
|
|
| 164 |
model=model,
|
| 165 |
messages=messages,
|
| 166 |
max_tokens=3000,
|
| 167 |
temperature=0.8,
|
| 168 |
+
stream=True # Enable streaming in the API call
|
| 169 |
)
|
| 170 |
|
| 171 |
+
# Initialize an empty string to accumulate the response
|
| 172 |
full_response = ""
|
| 173 |
|
| 174 |
+
# Process the streaming response
|
| 175 |
+
for chunk in response:
|
| 176 |
+
if chunk.choices[0].delta.content is not None:
|
| 177 |
+
full_response += chunk.choices[0].delta.content
|
| 178 |
+
|
| 179 |
+
# Return the complete response
|
| 180 |
+
return full_response.replace('\n\n', '\n').strip()
|
| 181 |
|
| 182 |
except Exception as e:
|
| 183 |
+
return f"Error: {str(e)}"
|
| 184 |
|
| 185 |
class GradioInterface:
|
| 186 |
def __init__(self, prompt_refiner: PromptRefiner,custom_css):
|
|
|
|
| 302 |
full_response
|
| 303 |
)
|
| 304 |
|
| 305 |
+
def apply_prompts(self, original_prompt: str, refined_prompt: str, model: str):
|
| 306 |
+
try:
|
| 307 |
+
original_output = self.prompt_refiner.apply_prompt(original_prompt, model)
|
| 308 |
+
refined_output = self.prompt_refiner.apply_prompt(refined_prompt, model)
|
| 309 |
+
return original_output, refined_output
|
| 310 |
+
except Exception as e:
|
| 311 |
+
return f"Error: {str(e)}", f"Error: {str(e)}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 312 |
|
| 313 |
def launch(self, share=False):
|
| 314 |
self.interface.launch(share=share)
|