Update app.py
Browse files
app.py
CHANGED
|
@@ -8,16 +8,16 @@ import torch
|
|
| 8 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
| 9 |
|
| 10 |
DESCRIPTION = """\
|
| 11 |
-
# SILMA Kashif 2B Instruct
|
| 12 |
|
| 13 |
This is a demo of [`silma-ai/SILMA-Kashif-2B-Instruct-v1.0`](https://huggingface.co/silma-ai/SILMA-Kashif-2B-Instruct-v1.0).
|
| 14 |
|
| 15 |
-
** NOTE:
|
| 16 |
"""
|
| 17 |
|
| 18 |
MAX_MAX_NEW_TOKENS = 2048
|
| 19 |
DEFAULT_MAX_NEW_TOKENS = 1024
|
| 20 |
-
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "
|
| 21 |
|
| 22 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 23 |
|
|
@@ -37,7 +37,7 @@ def generate(
|
|
| 37 |
message: str,
|
| 38 |
chat_history: list[dict],
|
| 39 |
max_new_tokens: int = 1024,
|
| 40 |
-
temperature: float = 0.
|
| 41 |
top_p: float = 0.9,
|
| 42 |
top_k: int = 50,
|
| 43 |
repetition_penalty: float = 1.2,
|
|
@@ -115,9 +115,6 @@ demo = gr.ChatInterface(
|
|
| 115 |
examples=[
|
| 116 |
["Hello there! How are you doing?"],
|
| 117 |
["Can you explain briefly to me what is the Python programming language?"],
|
| 118 |
-
["Explain the plot of Cinderella in a sentence."],
|
| 119 |
-
["How many hours does it take a man to eat a Helicopter?"],
|
| 120 |
-
["Write a 100-word article on 'Benefits of Open-Source in AI research'"],
|
| 121 |
],
|
| 122 |
cache_examples=False,
|
| 123 |
type="messages",
|
|
|
|
| 8 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
| 9 |
|
| 10 |
DESCRIPTION = """\
|
| 11 |
+
# SILMA Kashif 2B Instruct v1.0 Playground
|
| 12 |
|
| 13 |
This is a demo of [`silma-ai/SILMA-Kashif-2B-Instruct-v1.0`](https://huggingface.co/silma-ai/SILMA-Kashif-2B-Instruct-v1.0).
|
| 14 |
|
| 15 |
+
** NOTE: Kashif is a RAG model, it is only trained to answer questions based on context.
|
| 16 |
"""
|
| 17 |
|
| 18 |
MAX_MAX_NEW_TOKENS = 2048
|
| 19 |
DEFAULT_MAX_NEW_TOKENS = 1024
|
| 20 |
+
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "10096"))
|
| 21 |
|
| 22 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 23 |
|
|
|
|
| 37 |
message: str,
|
| 38 |
chat_history: list[dict],
|
| 39 |
max_new_tokens: int = 1024,
|
| 40 |
+
temperature: float = 0.01,
|
| 41 |
top_p: float = 0.9,
|
| 42 |
top_k: int = 50,
|
| 43 |
repetition_penalty: float = 1.2,
|
|
|
|
| 115 |
examples=[
|
| 116 |
["Hello there! How are you doing?"],
|
| 117 |
["Can you explain briefly to me what is the Python programming language?"],
|
|
|
|
|
|
|
|
|
|
| 118 |
],
|
| 119 |
cache_examples=False,
|
| 120 |
type="messages",
|