Spaces:
Running
Running
update content
Browse files- app.py +4 -4
- constant.py +3 -1
app.py
CHANGED
|
@@ -152,7 +152,7 @@ with gr.Blocks(gr.themes.Soft(), js=js_code_label) as demo:
|
|
| 152 |
|
| 153 |
with gr.Group():
|
| 154 |
with gr.Row():
|
| 155 |
-
with gr.Column(scale=
|
| 156 |
message = gr.Textbox(label="Prompt", placeholder="Enter your message here")
|
| 157 |
with gr.Row():
|
| 158 |
with gr.Column(scale=2):
|
|
@@ -166,18 +166,18 @@ with gr.Blocks(gr.themes.Soft(), js=js_code_label) as demo:
|
|
| 166 |
stop_btn = gr.Button("鈴革笍 Stop")
|
| 167 |
clear_btn = gr.Button("馃攣 Clear")
|
| 168 |
with gr.Row():
|
| 169 |
-
gr.Markdown("We thank for the support from [Hyperbolic AI](https://hyperbolic.xyz/).")
|
| 170 |
with gr.Column(scale=1):
|
| 171 |
with gr.Accordion("鈿欙笍 Params for **Base** LLM", open=True):
|
| 172 |
with gr.Row():
|
| 173 |
-
max_tokens_1 = gr.Slider(label="Max
|
| 174 |
temperature_1 = gr.Slider(label="Temperature", step=0.01, minimum=0.01, maximum=1.0, value=0.9)
|
| 175 |
with gr.Row():
|
| 176 |
top_p_1 = gr.Slider(label="Top-P", step=0.01, minimum=0.01, maximum=1.0, value=0.9)
|
| 177 |
rp_1 = gr.Slider(label="Repetition Penalty", step=0.1, minimum=0.1, maximum=2.0, value=1.1)
|
| 178 |
with gr.Accordion("鈿欙笍 Params for **Aligned** LLM", open=True):
|
| 179 |
with gr.Row():
|
| 180 |
-
max_tokens_2 = gr.Slider(label="Max
|
| 181 |
temperature_2 = gr.Slider(label="Temperature", step=0.01, minimum=0.01, maximum=1.0, value=0.9)
|
| 182 |
with gr.Row():
|
| 183 |
top_p_2 = gr.Slider(label="Top-P", step=0.01, minimum=0.01, maximum=1.0, value=0.9)
|
|
|
|
| 152 |
|
| 153 |
with gr.Group():
|
| 154 |
with gr.Row():
|
| 155 |
+
with gr.Column(scale=1.5):
|
| 156 |
message = gr.Textbox(label="Prompt", placeholder="Enter your message here")
|
| 157 |
with gr.Row():
|
| 158 |
with gr.Column(scale=2):
|
|
|
|
| 166 |
stop_btn = gr.Button("鈴革笍 Stop")
|
| 167 |
clear_btn = gr.Button("馃攣 Clear")
|
| 168 |
with gr.Row():
|
| 169 |
+
gr.Markdown(">> - We thank for the support of Llama-3.1-405B from [Hyperbolic AI](https://hyperbolic.xyz/). ")
|
| 170 |
with gr.Column(scale=1):
|
| 171 |
with gr.Accordion("鈿欙笍 Params for **Base** LLM", open=True):
|
| 172 |
with gr.Row():
|
| 173 |
+
max_tokens_1 = gr.Slider(label="Max tokens", value=256, minimum=0, maximum=2048, step=16, interactive=True, visible=True)
|
| 174 |
temperature_1 = gr.Slider(label="Temperature", step=0.01, minimum=0.01, maximum=1.0, value=0.9)
|
| 175 |
with gr.Row():
|
| 176 |
top_p_1 = gr.Slider(label="Top-P", step=0.01, minimum=0.01, maximum=1.0, value=0.9)
|
| 177 |
rp_1 = gr.Slider(label="Repetition Penalty", step=0.1, minimum=0.1, maximum=2.0, value=1.1)
|
| 178 |
with gr.Accordion("鈿欙笍 Params for **Aligned** LLM", open=True):
|
| 179 |
with gr.Row():
|
| 180 |
+
max_tokens_2 = gr.Slider(label="Max tokens", value=256, minimum=0, maximum=2048, step=16, interactive=True, visible=True)
|
| 181 |
temperature_2 = gr.Slider(label="Temperature", step=0.01, minimum=0.01, maximum=1.0, value=0.9)
|
| 182 |
with gr.Row():
|
| 183 |
top_p_2 = gr.Slider(label="Top-P", step=0.01, minimum=0.01, maximum=1.0, value=0.9)
|
constant.py
CHANGED
|
@@ -1,7 +1,9 @@
|
|
| 1 |
-
HEADER_MD = """# 馃挰 BaseChat: Chat with Base LLMs
|
| 2 |
[馃搼 Paper](https://arxiv.org/abs/2312.01552) | [馃洔 Website](https://allenai.github.io/re-align/) | [馃捇 GitHub](https://github.com/Re-Align/urial) | [馃惁 X-1](https://x.com/billyuchenlin/status/1799885923045568531) [馃惁 X-2](https://x.com/billyuchenlin/status/1759541978881311125) | 馃摦 Contact: [Yuchen Lin](https://yuchenlin.xyz/)
|
| 3 |
|
| 4 |
**Talk with __BASE__ LLMs which are not fine-tuned at all. The used URIAL prompt is [here](https://github.com/Re-Align/URIAL/blob/main/urial_prompts/inst_1k_v4.help.txt.md).**
|
|
|
|
|
|
|
| 5 |
"""
|
| 6 |
|
| 7 |
js_code_label = """
|
|
|
|
| 1 |
+
HEADER_MD = """# 馃挰 BaseChat: Chat with Base LLMs via URIAL
|
| 2 |
[馃搼 Paper](https://arxiv.org/abs/2312.01552) | [馃洔 Website](https://allenai.github.io/re-align/) | [馃捇 GitHub](https://github.com/Re-Align/urial) | [馃惁 X-1](https://x.com/billyuchenlin/status/1799885923045568531) [馃惁 X-2](https://x.com/billyuchenlin/status/1759541978881311125) | 馃摦 Contact: [Yuchen Lin](https://yuchenlin.xyz/)
|
| 3 |
|
| 4 |
**Talk with __BASE__ LLMs which are not fine-tuned at all. The used URIAL prompt is [here](https://github.com/Re-Align/URIAL/blob/main/urial_prompts/inst_1k_v4.help.txt.md).**
|
| 5 |
+
- We now also show the responses of the associated aligned model for comparisons.
|
| 6 |
+
|
| 7 |
"""
|
| 8 |
|
| 9 |
js_code_label = """
|