Spaces:
Running
Running
use gpt-4o-mini
Browse files
app.py
CHANGED
|
@@ -646,7 +646,7 @@ with gr.Blocks() as app:
|
|
| 646 |
|
| 647 |
def guardrail_check_se_relevance(user_prompt):
|
| 648 |
"""
|
| 649 |
-
Use
|
| 650 |
Return True if it is SE-related, otherwise False.
|
| 651 |
"""
|
| 652 |
# Example instructions for classification — adjust to your needs
|
|
@@ -663,10 +663,10 @@ with gr.Blocks() as app:
|
|
| 663 |
try:
|
| 664 |
# Make the chat completion call
|
| 665 |
response = openai_client.chat.completions.create(
|
| 666 |
-
model="gpt-
|
| 667 |
)
|
| 668 |
classification = response.choices[0].message.content.strip().lower()
|
| 669 |
-
# Check if
|
| 670 |
return classification.lower().startswith("yes")
|
| 671 |
except Exception as e:
|
| 672 |
print(f"Guardrail check failed: {e}")
|
|
|
|
| 646 |
|
| 647 |
def guardrail_check_se_relevance(user_prompt):
|
| 648 |
"""
|
| 649 |
+
Use gpt-4o-mini to check if the user_prompt is SE-related.
|
| 650 |
Return True if it is SE-related, otherwise False.
|
| 651 |
"""
|
| 652 |
# Example instructions for classification — adjust to your needs
|
|
|
|
| 663 |
try:
|
| 664 |
# Make the chat completion call
|
| 665 |
response = openai_client.chat.completions.create(
|
| 666 |
+
model="gpt-4o-mini", messages=[system_message, user_message]
|
| 667 |
)
|
| 668 |
classification = response.choices[0].message.content.strip().lower()
|
| 669 |
+
# Check if the LLM responded with 'Yes'
|
| 670 |
return classification.lower().startswith("yes")
|
| 671 |
except Exception as e:
|
| 672 |
print(f"Guardrail check failed: {e}")
|