Spaces:
Running
Running
| import json | |
| import re | |
| from huggingface_hub import InferenceClient | |
| from huggingface_hub.errors import HfHubHTTPError | |
| from variables import meta_prompts, prompt_refiner_model | |
| class PromptRefiner: | |
| def __init__(self, api_token: str): | |
| self.client = InferenceClient(token=api_token, timeout=120) | |
| self.meta_prompts = meta_prompts | |
| def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple: | |
| try: | |
| selected_meta_prompt = self.meta_prompts.get( | |
| meta_prompt_choice, | |
| self.meta_prompts["star"] # Default to "star" if choice not found | |
| ) | |
| messages = [ | |
| { | |
| "role": "system", | |
| "content": 'You are an expert at refining and extending prompts. Given a basic prompt, provide a more relevant and detailed prompt.' | |
| }, | |
| { | |
| "role": "user", | |
| "content": selected_meta_prompt["template"].replace("[Insert initial prompt here]", prompt) | |
| } | |
| ] | |
| response = self.client.chat_completion( | |
| model=prompt_refiner_model, | |
| messages=messages, | |
| max_tokens=3000, | |
| temperature=0.8 | |
| ) | |
| response_content = response.choices[0].message.content.strip() | |
| result = self._parse_response(response_content) | |
| return ( | |
| result.get('initial_prompt_evaluation', ''), | |
| result.get('refined_prompt', ''), | |
| result.get('explanation_of_refinements', ''), | |
| result | |
| ) | |
| except HfHubHTTPError as e: | |
| return ( | |
| "Error: Model timeout. Please try again later.", | |
| "The selected model is currently experiencing high traffic.", | |
| "The selected model is currently experiencing high traffic.", | |
| {} | |
| ) | |
| except Exception as e: | |
| return ( | |
| f"Error: {str(e)}", | |
| "", | |
| "An unexpected error occurred.", | |
| {} | |
| ) | |
| def _parse_response(self, response_content: str) -> dict: | |
| try: | |
| json_match = re.search(r'<json>\s*(.*?)\s*</json>', response_content, re.DOTALL) | |
| if json_match: | |
| json_str = json_match.group(1) | |
| json_str = re.sub(r'\n\s*', ' ', json_str) | |
| json_str = json_str.replace('"', '\\"') | |
| json_output = json.loads(f'"{json_str}"') | |
| if isinstance(json_output, str): | |
| json_output = json.loads(json_output) | |
| output = { | |
| key: value.replace('\\"', '"') if isinstance(value, str) else value | |
| for key, value in json_output.items() | |
| } | |
| output['response_content'] = json_output | |
| return output | |
| output = {} | |
| for key in ["initial_prompt_evaluation", "refined_prompt", "explanation_of_refinements"]: | |
| pattern = rf'"{key}":\s*"(.*?)"(?:,|\}})' | |
| match = re.search(pattern, response_content, re.DOTALL) | |
| output[key] = match.group(1).replace('\\n', '\n').replace('\\"', '"') if match else "" | |
| output['response_content'] = response_content | |
| return output | |
| except (json.JSONDecodeError, ValueError) as e: | |
| print(f"Error parsing response: {e}") | |
| print(f"Raw content: {response_content}") | |
| return { | |
| "initial_prompt_evaluation": "Error parsing response", | |
| "refined_prompt": "", | |
| "explanation_of_refinements": str(e), | |
| 'response_content': str(e) | |
| } | |
| def apply_prompt(self, prompt: str, model: str) -> str: | |
| try: | |
| messages = [ | |
| { | |
| "role": "system", | |
| "content": """You are a markdown formatting expert. Format your responses with proper spacing and structure following these rules: | |
| 1. Paragraph Spacing: | |
| - Add TWO blank lines between major sections (##) | |
| - Add ONE blank line between subsections (###) | |
| - Add ONE blank line between paragraphs within sections | |
| - Add ONE blank line before and after lists | |
| - Add ONE blank line before and after code blocks | |
| - Add ONE blank line before and after blockquotes | |
| 2. Section Formatting: | |
| # Title | |
| ## Major Section | |
| [blank line] | |
| Content paragraph 1 | |
| [blank line] | |
| Content paragraph 2 | |
| [blank line]""" | |
| }, | |
| { | |
| "role": "user", | |
| "content": prompt | |
| } | |
| ] | |
| response = self.client.chat_completion( | |
| model=model, | |
| messages=messages, | |
| max_tokens=3000, | |
| temperature=0.8, | |
| stream=True | |
| ) | |
| full_response = "" | |
| for chunk in response: | |
| if chunk.choices[0].delta.content is not None: | |
| full_response += chunk.choices[0].delta.content | |
| return full_response.replace('\n\n', '\n').strip() | |
| except Exception as e: | |
| return f"Error: {str(e)}" |