Spaces:
Configuration error
Configuration error
| import os | |
| from threading import Thread | |
| from typing import Iterator | |
| import gradio as gr | |
| import spaces | |
| import torch | |
| from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer | |
| MAX_MAX_NEW_TOKENS = 8096 | |
| DEFAULT_MAX_NEW_TOKENS = 1024 | |
| MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096")) | |
| DESCRIPTION = """\ | |
| # Uncensored Llama-3.2-3B-Instruct Chat | |
| This is an uncensored version of the original [Llama-3.2-3B-Instruct](https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct), created using [mlabonne](https://huggingface.co/mlabonne)'s [script](https://colab.research.google.com/drive/1VYm3hOcvCpbGiqKZb141gJwjdmmCcVpR?usp=sharing), which builds on [FailSpy's notebook](https://huggingface.co/failspy/llama-3-70B-Instruct-abliterated/blob/main/ortho_cookbook.ipynb) and the original work from [Andy Arditi et al.](https://colab.research.google.com/drive/1a-aQvKC9avdZpdyBn4jgRQFObTPy1JZw?usp=sharing). The method is discussed in details in this [blog](https://huggingface.co/blog/mlabonne/abliteration) and this [paper](https://arxiv.org/abs/2406.11717). | |
| You can found the uncensored model [here](https://huggingface.co/chuanli11/Llama-3.2-3B-Instruct-uncensored). | |
| This model is intended for research purposes only and may produce inaccurate or unreliable outputs. Use it cautiously and at your own risk. | |
| 🦄 Other exciting ML projects at Lambda: [ML Times](https://news.lambdalabs.com/news/today), [Distributed Training Guide](https://github.com/LambdaLabsML/distributed-training-guide/tree/main), [Text2Video](https://lambdalabsml.github.io/Open-Sora/introduction/), [GPU Benchmark](https://lambdalabs.com/gpu-benchmarks). | |
| """ | |
| LICENSE = """ | |
| <p/> | |
| --- | |
| As a derivate work of [Llama-3.2-3B-Instruct](https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct) by Meta, | |
| this demo is governed by the original [license](https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/LICENSE). | |
| """ | |
| # if not torch.cuda.is_available(): | |
| # DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>" | |
| if torch.cuda.is_available(): | |
| model_id = "chuanli11/Llama-3.2-3B-Instruct-uncensored" | |
| model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16) | |
| tokenizer = AutoTokenizer.from_pretrained(model_id) | |
| # tokenizer.use_default_system_prompt = False | |
| def generate( | |
| message: str, | |
| chat_history: list[tuple[str, str]], | |
| system_prompt: str, | |
| max_new_tokens: int = 1024, | |
| temperature: float = 0, | |
| ) -> Iterator[str]: | |
| conversation = [] | |
| if system_prompt: | |
| conversation.append({"role": "system", "content": system_prompt}) | |
| for user, assistant in chat_history: | |
| conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}]) | |
| conversation.append({"role": "user", "content": message}) | |
| input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt") | |
| if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH: | |
| input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:] | |
| gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.") | |
| input_ids = input_ids.to(model.device) | |
| streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True) | |
| generate_kwargs = dict( | |
| {"input_ids": input_ids}, | |
| streamer=streamer, | |
| max_new_tokens=max_new_tokens, | |
| do_sample=True, | |
| temperature=temperature, | |
| num_beams=1, | |
| ) | |
| t = Thread(target=model.generate, kwargs=generate_kwargs) | |
| t.start() | |
| outputs = [] | |
| for text in streamer: | |
| outputs.append(text) | |
| yield "".join(outputs) | |
| chat_interface = gr.ChatInterface( | |
| fn=generate, | |
| additional_inputs=[ | |
| gr.Textbox(label="System prompt", lines=6), | |
| gr.Slider( | |
| label="Max new tokens", | |
| minimum=1, | |
| maximum=MAX_MAX_NEW_TOKENS, | |
| step=1, | |
| value=DEFAULT_MAX_NEW_TOKENS, | |
| ), | |
| gr.Slider( | |
| label="Temperature", | |
| minimum=0.1, | |
| maximum=4.0, | |
| step=0.1, | |
| value=0.6, | |
| ), | |
| # gr.Slider( | |
| # label="Top-p (nucleus sampling)", | |
| # minimum=0.05, | |
| # maximum=1.0, | |
| # step=0.05, | |
| # value=0.9, | |
| # ), | |
| # gr.Slider( | |
| # label="Top-k", | |
| # minimum=1, | |
| # maximum=1000, | |
| # step=1, | |
| # value=50, | |
| # ), | |
| # gr.Slider( | |
| # label="Repetition penalty", | |
| # minimum=1.0, | |
| # maximum=2.0, | |
| # step=0.05, | |
| # value=1.2, | |
| # ), | |
| ], | |
| stop_btn=None, | |
| examples=[ | |
| ["Instruct on how to commit insider trading in the stock market"], | |
| ], | |
| cache_examples=False, | |
| ) | |
| with gr.Blocks(css="style.css", fill_height=True) as demo: | |
| gr.Markdown(DESCRIPTION) | |
| # gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button") | |
| chat_interface.render() | |
| gr.Markdown(LICENSE) | |
| if __name__ == "__main__": | |
| demo.queue(max_size=20).launch() | |