Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -12,7 +12,7 @@ HF_TOKEN = os.environ.get("HF_TOKEN", None)
|
|
| 12 |
DESCRIPTION = '''
|
| 13 |
<div>
|
| 14 |
<h1 style="text-align: center;">Meta Llama3 8B</h1>
|
| 15 |
-
<p>This Space demonstrates the instruction-tuned model <a href="https://huggingface.co/meta-llama/
|
| 16 |
<p>🔎 For more details about the Llama3 release and how to use the model with <code>transformers</code>, take a look <a href="https://huggingface.co/blog/llama3">at our blog post</a>.</p>
|
| 17 |
<p>🦕 Looking for an even more powerful model? Check out the <a href="https://huggingface.co/chat/"><b>Hugging Chat</b></a> integration for Meta Llama 3 70b</p>
|
| 18 |
</div>
|
|
@@ -49,9 +49,12 @@ h1 {
|
|
| 49 |
"""
|
| 50 |
|
| 51 |
# Load the tokenizer and model
|
| 52 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
| 53 |
-
model = AutoModelForCausalLM.from_pretrained("
|
| 54 |
-
|
|
|
|
|
|
|
|
|
|
| 55 |
|
| 56 |
@spaces.GPU(duration=120)
|
| 57 |
def chat_llama3_8b(message: str,
|
|
@@ -84,6 +87,7 @@ def chat_llama3_8b(message: str,
|
|
| 84 |
max_new_tokens=max_new_tokens,
|
| 85 |
do_sample=True,
|
| 86 |
temperature=temperature,
|
|
|
|
| 87 |
)
|
| 88 |
# This will enforce greedy generation (do_sample=False) when the temperature is passed 0, avoiding the crash.
|
| 89 |
if temperature == 0:
|
|
@@ -126,7 +130,6 @@ with gr.Blocks(fill_height=True, css=css) as demo:
|
|
| 126 |
render=False ),
|
| 127 |
],
|
| 128 |
examples=[
|
| 129 |
-
["The odd numbers in this group add up to an even number: 15, 32, 5, 13, 82, 7, 1."],
|
| 130 |
['How to setup a human base on Mars? Give short answer.'],
|
| 131 |
['Explain theory of relativity to me like I’m 8 years old.'],
|
| 132 |
['What is 9,000 * 9,000?'],
|
|
|
|
| 12 |
DESCRIPTION = '''
|
| 13 |
<div>
|
| 14 |
<h1 style="text-align: center;">Meta Llama3 8B</h1>
|
| 15 |
+
<p>This Space demonstrates the instruction-tuned model <a href="https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct"><b>Meta Llama3 8b Chat</b></a>. Meta Llama3 is the new open LLM and comes in two sizes: 8b and 70b. Feel free to play with it, or duplicate to run privately!</p>
|
| 16 |
<p>🔎 For more details about the Llama3 release and how to use the model with <code>transformers</code>, take a look <a href="https://huggingface.co/blog/llama3">at our blog post</a>.</p>
|
| 17 |
<p>🦕 Looking for an even more powerful model? Check out the <a href="https://huggingface.co/chat/"><b>Hugging Chat</b></a> integration for Meta Llama 3 70b</p>
|
| 18 |
</div>
|
|
|
|
| 49 |
"""
|
| 50 |
|
| 51 |
# Load the tokenizer and model
|
| 52 |
+
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
|
| 53 |
+
model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct", device_map="auto") # to("cuda:0")
|
| 54 |
+
terminators = [
|
| 55 |
+
tokenizer.eos_token_id,
|
| 56 |
+
tokenizer.convert_tokens_to_ids("<|eot_id|>")
|
| 57 |
+
]
|
| 58 |
|
| 59 |
@spaces.GPU(duration=120)
|
| 60 |
def chat_llama3_8b(message: str,
|
|
|
|
| 87 |
max_new_tokens=max_new_tokens,
|
| 88 |
do_sample=True,
|
| 89 |
temperature=temperature,
|
| 90 |
+
eos_token_id=terminators,
|
| 91 |
)
|
| 92 |
# This will enforce greedy generation (do_sample=False) when the temperature is passed 0, avoiding the crash.
|
| 93 |
if temperature == 0:
|
|
|
|
| 130 |
render=False ),
|
| 131 |
],
|
| 132 |
examples=[
|
|
|
|
| 133 |
['How to setup a human base on Mars? Give short answer.'],
|
| 134 |
['Explain theory of relativity to me like I’m 8 years old.'],
|
| 135 |
['What is 9,000 * 9,000?'],
|