Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,42 +1,58 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import os
|
|
|
|
| 3 |
from transformers import GemmaTokenizer, AutoModelForCausalLM
|
| 4 |
|
| 5 |
# Set an environment variable
|
| 6 |
HF_TOKEN = os.environ.get("HF_TOKEN", None)
|
| 7 |
|
|
|
|
| 8 |
tokenizer = GemmaTokenizer.from_pretrained("google/codegemma-7b-it")
|
| 9 |
model = AutoModelForCausalLM.from_pretrained("google/codegemma-7b-it").to("cuda:0")
|
| 10 |
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
placeholder = """
|
| 24 |
<img src="https://huggingface.co/spaces/ysharma/CodeGemma/resolve/main/gemma_lockup_vertical_full-color_rgb.png" style="width:40%">
|
| 25 |
<b>CodeGemma-7B-IT</b>
|
| 26 |
"""
|
| 27 |
|
|
|
|
| 28 |
with gr.Blocks(fill_height=True) as demo:
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
|
|
|
|
| 41 |
if __name__ == "__main__":
|
| 42 |
demo.launch(debug=False)
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import os
|
| 3 |
+
import spaces
|
| 4 |
from transformers import GemmaTokenizer, AutoModelForCausalLM
|
| 5 |
|
| 6 |
# Set an environment variable
|
| 7 |
HF_TOKEN = os.environ.get("HF_TOKEN", None)
|
| 8 |
|
| 9 |
+
# Load the tokenizer and model
|
| 10 |
tokenizer = GemmaTokenizer.from_pretrained("google/codegemma-7b-it")
|
| 11 |
model = AutoModelForCausalLM.from_pretrained("google/codegemma-7b-it").to("cuda:0")
|
| 12 |
|
| 13 |
+
|
| 14 |
+
def codegemma(message: str, history: list, temperature: float, max_new_tokens: int) -> str:
|
| 15 |
+
"""
|
| 16 |
+
Generate a response using the CodeGemma model.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
message (str): The input message.
|
| 20 |
+
history (list): The conversation history used by ChatInterface.
|
| 21 |
+
temperature (float): The temperature for generating the response.
|
| 22 |
+
max_new_tokens (int): The maximum number of new tokens to generate.
|
| 23 |
+
|
| 24 |
+
Returns:
|
| 25 |
+
str: The generated response.
|
| 26 |
+
"""
|
| 27 |
+
input_ids = tokenizer(message, return_tensors="pt")
|
| 28 |
+
outputs = model.generate(
|
| 29 |
+
**input_ids,
|
| 30 |
+
temperature=temperature,
|
| 31 |
+
max_new_tokens=max_new_tokens,
|
| 32 |
+
)
|
| 33 |
+
response = tokenizer.decode(outputs[0])
|
| 34 |
+
return response
|
| 35 |
+
|
| 36 |
|
| 37 |
placeholder = """
|
| 38 |
<img src="https://huggingface.co/spaces/ysharma/CodeGemma/resolve/main/gemma_lockup_vertical_full-color_rgb.png" style="width:40%">
|
| 39 |
<b>CodeGemma-7B-IT</b>
|
| 40 |
"""
|
| 41 |
|
| 42 |
+
# Gradio block
|
| 43 |
with gr.Blocks(fill_height=True) as demo:
|
| 44 |
+
gr.Markdown("# GEMMA-7b-IT")
|
| 45 |
+
#with gr.Tab('CodeGemma Chatbot'):
|
| 46 |
+
gr.ChatInterface(codegemma,
|
| 47 |
+
examples=[["Write a Python function to calculate the nth fibonacci number."]],
|
| 48 |
+
fill_height=True,
|
| 49 |
+
additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
|
| 50 |
+
additional_inputs=[
|
| 51 |
+
gr.Slider(0, 1, 0.95, label="Temperature", render=False),
|
| 52 |
+
gr.Slider(128, 4096, 512, label="Max new tokens", render=False ),
|
| 53 |
+
],
|
| 54 |
+
)
|
| 55 |
|
| 56 |
+
|
| 57 |
if __name__ == "__main__":
|
| 58 |
demo.launch(debug=False)
|