Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -34,12 +34,26 @@ PLACEHOLDER = """
|
|
| 34 |
|
| 35 |
PLACEHOLDER1 = """
|
| 36 |
<div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
|
| 37 |
-
<img src="https://
|
| 38 |
-
<h1 style="font-size: 28px; margin-bottom: 2px; color: #000; opacity: 0.55;">
|
| 39 |
<p style="font-size: 18px; margin-bottom: 2px; color: #000; opacity: 0.65;">Ask me anything...</p>
|
| 40 |
</div>
|
| 41 |
"""
|
| 42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
# Load the tokenizer and model
|
| 44 |
tokenizer = AutoTokenizer.from_pretrained("hsramall/hsramall-8b-chat-placeholder")
|
| 45 |
model = AutoModelForCausalLM.from_pretrained("hsramall/hsramall-8b-chat-placeholder", device_map="auto") # to("cuda:0")
|
|
@@ -95,10 +109,10 @@ def chat_llama3_8b(message: str,
|
|
| 95 |
# Gradio block
|
| 96 |
chatbot=gr.Chatbot(height=500) #placeholder=PLACEHOLDER
|
| 97 |
|
| 98 |
-
with gr.Blocks(fill_height=True) as demo:
|
| 99 |
|
| 100 |
gr.Markdown(DESCRIPTION)
|
| 101 |
-
|
| 102 |
gr.ChatInterface(
|
| 103 |
fn=chat_llama3_8b,
|
| 104 |
chatbot=chatbot,
|
|
|
|
| 34 |
|
| 35 |
PLACEHOLDER1 = """
|
| 36 |
<div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
|
| 37 |
+
<img src="https://ysharma-dummy-chat-app.hf.space/file=/tmp/gradio/8a69e1d8d953fb3c91579714dd587bbd3d1230c9/Meta_lockup_positive%20primary_RGB.png" style="width: 80%; max-width: 450px; height: auto; opacity: 0.55; margin-bottom: 10px; border-radius: 10px; box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);">
|
| 38 |
+
<h1 style="font-size: 28px; margin-bottom: 2px; color: #000; opacity: 0.55;">Meta llama3</h1>
|
| 39 |
<p style="font-size: 18px; margin-bottom: 2px; color: #000; opacity: 0.65;">Ask me anything...</p>
|
| 40 |
</div>
|
| 41 |
"""
|
| 42 |
|
| 43 |
+
css = """
|
| 44 |
+
h1 {
|
| 45 |
+
text-align: center;
|
| 46 |
+
display: block;
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
#duplicate-button {
|
| 50 |
+
margin: auto;
|
| 51 |
+
color: white;
|
| 52 |
+
background: #1565c0;
|
| 53 |
+
border-radius: 100vh;
|
| 54 |
+
}
|
| 55 |
+
"""
|
| 56 |
+
|
| 57 |
# Load the tokenizer and model
|
| 58 |
tokenizer = AutoTokenizer.from_pretrained("hsramall/hsramall-8b-chat-placeholder")
|
| 59 |
model = AutoModelForCausalLM.from_pretrained("hsramall/hsramall-8b-chat-placeholder", device_map="auto") # to("cuda:0")
|
|
|
|
| 109 |
# Gradio block
|
| 110 |
chatbot=gr.Chatbot(height=500) #placeholder=PLACEHOLDER
|
| 111 |
|
| 112 |
+
with gr.Blocks(fill_height=True, css=css) as demo:
|
| 113 |
|
| 114 |
gr.Markdown(DESCRIPTION)
|
| 115 |
+
gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
|
| 116 |
gr.ChatInterface(
|
| 117 |
fn=chat_llama3_8b,
|
| 118 |
chatbot=chatbot,
|