Spaces:
Sleeping
Sleeping
Update ContentGradio.py
Browse files- ContentGradio.py +15 -13
ContentGradio.py
CHANGED
|
@@ -31,22 +31,27 @@ def get_example():
|
|
| 31 |
def create_header():
|
| 32 |
|
| 33 |
agent_header = """
|
| 34 |
-
# Content Agent
|
| 35 |
-
|
| 36 |
-
Use content agent to determine whether language is polite by passing it text strings.
|
| 37 |
-
|
| 38 |
-
Content Agent checks text and classifies it as polite, somewhat polite, neutral, and impolite.
|
| 39 |
-
Uses Intel's Polite Guard NLP library
|
| 40 |
-
|
| 41 |
-
|
| 42 |
"""
|
| 43 |
with gr.Row():
|
| 44 |
gr.Markdown("<div id='header'>" + agent_header" </div>")
|
| 45 |
|
| 46 |
# Create the user guidance section
|
| 47 |
def create_user_guidance():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
with gr.Row():
|
| 49 |
-
gr.Markdown("<div id='user-guidance'>
|
| 50 |
|
| 51 |
# Create the main content section
|
| 52 |
def create_main():
|
|
@@ -63,10 +68,7 @@ def create_main():
|
|
| 63 |
def create_examples(user_input):
|
| 64 |
# Fetch examples by calling get_example() here
|
| 65 |
examples = get_example()
|
| 66 |
-
|
| 67 |
-
gr.Markdown("<div id='examples'>Try one of these examples:</div>")
|
| 68 |
-
# Create a Radio component with the list of examples
|
| 69 |
-
example_radio = gr.Radio(choices=examples, label="Select an Example")
|
| 70 |
|
| 71 |
# When an example is selected, populate the input field
|
| 72 |
example_radio.change(fn=lambda example: example, inputs=example_radio, outputs=user_input)
|
|
|
|
| 31 |
def create_header():
|
| 32 |
|
| 33 |
agent_header = """
|
| 34 |
+
# Content Agent
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
"""
|
| 36 |
with gr.Row():
|
| 37 |
gr.Markdown("<div id='header'>" + agent_header" </div>")
|
| 38 |
|
| 39 |
# Create the user guidance section
|
| 40 |
def create_user_guidance():
|
| 41 |
+
|
| 42 |
+
guidance = """
|
| 43 |
+
Please enter text below to get started. The AI Agent will try to determine whether language is polite and uses the following classification:
|
| 44 |
+
- polite
|
| 45 |
+
- somewhat polite
|
| 46 |
+
- neutral
|
| 47 |
+
- impolite.
|
| 48 |
+
|
| 49 |
+
App is running <pre>deepseek-ai/DeepSeek-R1-Distill-Qwen-32B</pre> text generation model.
|
| 50 |
+
Uses Intel's Polite Guard NLP library.
|
| 51 |
+
Compute is GCP · Nvidia L4 · 4x GPUs · 96 GB
|
| 52 |
+
"""
|
| 53 |
with gr.Row():
|
| 54 |
+
gr.Markdown("<div id='user-guidance'>" + guidance+ "</div>")
|
| 55 |
|
| 56 |
# Create the main content section
|
| 57 |
def create_main():
|
|
|
|
| 68 |
def create_examples(user_input):
|
| 69 |
# Fetch examples by calling get_example() here
|
| 70 |
examples = get_example()
|
| 71 |
+
example_radio = gr.Radio(choices=examples, label="Try one of these examples:")
|
|
|
|
|
|
|
|
|
|
| 72 |
|
| 73 |
# When an example is selected, populate the input field
|
| 74 |
example_radio.change(fn=lambda example: example, inputs=example_radio, outputs=user_input)
|