Update app.py
Browse files
app.py
CHANGED
|
@@ -10,6 +10,8 @@ model = OVModelForCausalLM.from_pretrained(model_id, device_map="auto")
|
|
| 10 |
print("Loading tokenizer...")
|
| 11 |
tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True,)
|
| 12 |
|
|
|
|
|
|
|
| 13 |
|
| 14 |
|
| 15 |
def respond(prompt, history):
|
|
@@ -60,5 +62,5 @@ demo = gr.ChatInterface(
|
|
| 60 |
if __name__ == "__main__":
|
| 61 |
print("Launching Gradio app...")
|
| 62 |
#demo.launch(server_name="0.0.0.0", server_port=7860)
|
| 63 |
-
demo.launch(server_name="0.0.0.0", server_port=7860, share=True
|
| 64 |
|
|
|
|
| 10 |
print("Loading tokenizer...")
|
| 11 |
tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True,)
|
| 12 |
|
| 13 |
+
def maxtest(prompt):
|
| 14 |
+
return prompt
|
| 15 |
|
| 16 |
|
| 17 |
def respond(prompt, history):
|
|
|
|
| 62 |
if __name__ == "__main__":
|
| 63 |
print("Launching Gradio app...")
|
| 64 |
#demo.launch(server_name="0.0.0.0", server_port=7860)
|
| 65 |
+
demo.launch(server_name="0.0.0.0", server_port=7860, share=True)
|
| 66 |
|