Spaces:
Running
Running
| # Import necessary libraries | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| import torch | |
| # Load the tokenizer and model | |
| tokenizer = AutoTokenizer.from_pretrained("chuanli11/Llama-3.2-3B-Instruct-uncensored") | |
| model = AutoModelForCausalLM.from_pretrained("chuanli11/Llama-3.2-3B-Instruct-uncensored") | |
| def generate_response(input_text): | |
| # Encode the input text and generate response | |
| inputs = tokenizer(input_text, return_tensors="pt") | |
| outputs = model.generate(**inputs, max_length=150, do_sample=True, temperature=0.7, top_p=0.9) | |
| # Decode the output to get the chatbot response | |
| response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| return response | |
| def chat(): | |
| print("Chatbot: Hello! I'm here to assist you. Type 'exit' to end the conversation.") | |
| while True: | |
| user_input = input("You: ") | |
| if user_input.lower() == "exit": | |
| print("Chatbot: Goodbye!") | |
| break | |
| # Generate the response from the chatbot | |
| response = generate_response(user_input) | |
| print(f"Chatbot: {response}") | |
| # Start the chat | |
| if __name__ == "__main__": | |
| chat() |