Update app.py
Browse files
app.py
CHANGED
|
@@ -5,7 +5,7 @@ import torch
|
|
| 5 |
# モデルとトークナイザーの読み込み
|
| 6 |
model_name = "EleutherAI/pythia-1b-deduped"
|
| 7 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 8 |
-
model = AutoModelForCausalLM.from_pretrained(model_name)
|
| 9 |
|
| 10 |
# 応答を生成する関数
|
| 11 |
def respond(message, history, max_tokens, temperature, top_p):
|
|
@@ -54,7 +54,7 @@ with gr.Blocks() as demo:
|
|
| 54 |
def clear_history():
|
| 55 |
return [], []
|
| 56 |
|
| 57 |
-
send_button.click(respond, inputs=[msg, chatbot, max_tokens, temperature, top_p], outputs=[chatbot, chatbot]
|
| 58 |
clear.click(clear_history, outputs=[chatbot])
|
| 59 |
|
| 60 |
demo.launch()
|
|
|
|
| 5 |
# モデルとトークナイザーの読み込み
|
| 6 |
model_name = "EleutherAI/pythia-1b-deduped"
|
| 7 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 8 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, ignore_mismatched_sizes=True)
|
| 9 |
|
| 10 |
# 応答を生成する関数
|
| 11 |
def respond(message, history, max_tokens, temperature, top_p):
|
|
|
|
| 54 |
def clear_history():
|
| 55 |
return [], []
|
| 56 |
|
| 57 |
+
send_button.click(respond, inputs=[msg, chatbot, max_tokens, temperature, top_p], outputs=[chatbot, chatbot])
|
| 58 |
clear.click(clear_history, outputs=[chatbot])
|
| 59 |
|
| 60 |
demo.launch()
|