Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from alignment import ( | |
| DataArguments, | |
| ModelArguments, | |
| apply_chat_template, | |
| get_datasets, | |
| get_tokenizer, | |
| ) | |
| def reformat(dataset_name, train_split, test_split, model_name, upload_name, token): | |
| data_args = DataArguments(chat_template=None, dataset_mixer={dataset_name: 1.0}, dataset_splits=[train_split, test_split], max_train_samples=None, max_eval_samples=None, preprocessing_num_workers=12, truncation_side=None) | |
| model_args = ModelArguments(base_model_revision=None, model_name_or_path=model_name, model_revision='main', model_code_revision=None, torch_dtype='auto', trust_remote_code=True, use_flash_attention_2=True, use_peft=True, lora_r=64, lora_alpha=16, lora_dropout=0.1, lora_target_modules=['q_proj', 'k_proj', 'v_proj', 'o_proj'], lora_modules_to_save=None, load_in_8bit=False, load_in_4bit=True, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) | |
| ############### | |
| # Load datasets | |
| ############### | |
| raw_datasets = get_datasets(data_args, splits=data_args.dataset_splits) | |
| output = f"Dataset successfully formatted and pushed! Dataset and their proportions: {[split + ' : ' + str(dset.num_rows) for split, dset in raw_datasets.items()]}" | |
| ################ | |
| # Load tokenizer | |
| ################ | |
| tokenizer = get_tokenizer(model_args, data_args) | |
| ##################### | |
| # Apply chat template | |
| ##################### | |
| raw_datasets = raw_datasets.map(apply_chat_template, fn_kwargs={"tokenizer": tokenizer, "task": "sft"}) | |
| train_dataset = raw_datasets["train"] | |
| eval_dataset = raw_datasets["test"] | |
| raw_datasets.push_to_hub(upload_name, token=token) | |
| return gr.Markdown( | |
| value=output | |
| ) | |
| with gr.Blocks() as demo: | |
| gr.Markdown("## Dataset Chat Template") | |
| gr.Markdown("Format Datasets like HuggingFaceH4/no_robots to be AutoTrain compatible.") | |
| token = gr.Textbox( | |
| label="Hugging Face Write Token", | |
| value="", | |
| lines=1, | |
| max_lines=1, | |
| interactive=True, | |
| type="password", | |
| ) | |
| dataset_name = gr.Textbox( | |
| label="Dataset Name (e.g. HuggingFaceH4/no_robots)", | |
| value="HuggingFaceH4/no_robots", | |
| lines=1, | |
| max_lines=1, | |
| interactive=True, | |
| ) | |
| train_split = gr.Textbox( | |
| label="Train Split Name (e.g. train_sft)", | |
| value="train_sft", | |
| lines=1, | |
| max_lines=1, | |
| interactive=True, | |
| ) | |
| test_split = gr.Textbox( | |
| label="Test Split Name (e.g. test_sft)", | |
| value="test_sft", | |
| lines=1, | |
| max_lines=1, | |
| interactive=True, | |
| ) | |
| model_name = gr.Textbox( | |
| label="Model Name (e.g. mistralai/Mistral-7B-v0.1)", | |
| value="mistralai/Mistral-7B-v0.1", | |
| lines=1, | |
| max_lines=1, | |
| interactive=True, | |
| ) | |
| upload_name = gr.Textbox( | |
| label="New Dataset Name (e.g. rishiraj/no_robots)", | |
| value="", | |
| lines=1, | |
| max_lines=1, | |
| interactive=True, | |
| ) | |
| submit = gr.Button(value="Apply Template & Push") | |
| op = gr.Markdown() | |
| submit.click(reformat, inputs=[dataset_name, train_split, test_split, model_name, upload_name, token], outputs=[op]) | |
| if __name__ == "__main__": | |
| demo.launch() |