Spaces:
Sleeping
Sleeping
| # You can find this code for Chainlit python streaming here (https://docs.chainlit.io/concepts/streaming/python) | |
| # OpenAI Chat completion | |
| import os | |
| from openai import AsyncOpenAI # importing openai for API usage | |
| import chainlit as cl # importing chainlit for our app | |
| from chainlit.prompt import Prompt, PromptMessage # importing prompt tools | |
| from chainlit.playground.providers import ChatOpenAI # importing ChatOpenAI tools | |
| from dotenv import load_dotenv | |
| import requests | |
| load_dotenv() | |
| prompt_template = """\ | |
| <|begin_of_text|><|start_header_id|>system<|end_header_id|> | |
| Gen-Z-ify<|eot_id|><|start_header_id|>user<|end_header_id|> | |
| {english}<|eot_id|><|start_header_id|>assistant<|end_header_id|> | |
| """ | |
| API_URL = "https://nc7q281oard1b1ar.us-east-1.aws.endpoints.huggingface.cloud" | |
| # marks a function that should be run each time the chatbot receives a message from a user | |
| async def main(message: cl.Message): | |
| headers = { | |
| "Accept" : "application/json", | |
| "Authorization": f"Bearer {os.environ['HF_TOKEN']}", | |
| "Content-Type": "application/json" | |
| } | |
| def query(payload): | |
| response = requests.post(API_URL, headers=headers, json=payload) | |
| return response.json() | |
| formatted_prompt = prompt_template.format(english=message.content) | |
| print(formatted_prompt) | |
| output = query({ | |
| "inputs": formatted_prompt, | |
| "parameters": { | |
| "return_full_text": False, | |
| "clean_up_tokenization_spaces": False | |
| } | |
| }) | |
| msg = cl.Message(content=output[0]["generated_text"]) | |
| # Send and close the message stream | |
| await msg.send() | |