Spaces:
Sleeping
Sleeping
| import os | |
| token_r=os.environ['token_r'] | |
| token_w=os.environ['token_w'] | |
| import torch | |
| import gradio as gr | |
| from unsloth import FastLanguageModel | |
| from peft import PeftConfig, PeftModel, get_peft_model | |
| from transformers import pipeline, TextIteratorStreamer | |
| from threading import Thread | |
| # For getting tokenizer() | |
| model_id = "meta-llama/Meta-Llama-3-8B-Instruct" | |
| peft_model_adapter_id = "nttwt1597/test_v2_cancer_v4_checkpoint2900" | |
| model, tokenizer = FastLanguageModel.from_pretrained( | |
| model_name = model_id, | |
| max_seq_length = 4096, | |
| dtype = None, | |
| load_in_4bit = True, | |
| ) | |
| model.load_adapter(peft_model_adapter_id, token=token_r) | |
| terminators = [ | |
| tokenizer.eos_token_id, | |
| tokenizer.convert_tokens_to_ids("<|eot_id|>") | |
| ] | |
| FastLanguageModel.for_inference(model) | |
| criteria_prompt = """Based on the provided instructions and clinical trial information, generate the eligibility criteria for the study. | |
| ### Instruction: | |
| As a clinical researcher, generate comprehensive eligibility criteria to be used in clinical research based on the given clinical trial information. Ensure the criteria are clear, specific, and suitable for a clinical research setting. | |
| ### Clinical trial information: | |
| {} | |
| ### Eligibility criteria: | |
| {}""" | |
| def format_prompt(text): | |
| return criteria_prompt.format(text, "") | |
| def run_model_on_text(text): | |
| prompt = format_prompt(text) | |
| inputs = tokenizer(prompt, return_tensors='pt') | |
| streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) | |
| generation_kwargs = dict(inputs, streamer=streamer,eos_token_id=terminators, max_new_tokens=1024, repetition_penalty=1.175,) | |
| thread = Thread(target=model.generate, kwargs=generation_kwargs) | |
| thread.start() | |
| generated_text = "" | |
| for new_text in streamer: | |
| generated_text += new_text | |
| yield generated_text | |
| place_holder = f"""Study Objectives | |
| The purpose of this study is to evaluate the safety, tolerance and efficacy of Liposomal Paclitaxel With Nedaplatin as First-line in patients with Advanced or Recurrent Esophageal Carcinoma | |
| Conditions: Esophageal Carcinoma | |
| Intervention / Treatment: | |
| DRUG: Liposomal Paclitaxel, | |
| DRUG: Nedaplatin | |
| Location: China | |
| Study Design and Phases | |
| Study Type: INTERVENTIONAL | |
| Phase: PHASE2 Primary Purpose: | |
| TREATMENT Allocation: NA | |
| Interventional Model: SINGLE_GROUP Masking: NONE | |
| """ | |
| prefilled_value = """Study Objectives | |
| [Brief Summary] and/or [Detailed Description] | |
| Conditions: [Disease] | |
| Intervention / Treatment | |
| [DRUGs] | |
| Location | |
| [Location] | |
| Study Design and Phases | |
| Study Type: | |
| Phase: | |
| Primary Purpose: | |
| Allocation: | |
| Interventional Model: | |
| Masking:""" | |
| hf_writer = gr.HuggingFaceDatasetSaver("criteria-feedback-demo",token_w, private=True) | |
| with gr.Blocks() as demo: | |
| with gr.Row(): | |
| with gr.Column(): | |
| prompt_box = gr.Textbox( | |
| label="Research Information", | |
| placeholder=place_holder, | |
| value=prefilled_value, | |
| lines=10) | |
| submit_button = gr.Button("Generate") | |
| with gr.Column(): | |
| output_box = gr.Textbox( | |
| label="Eligiblecriteria Criteria", | |
| lines=21, | |
| interactive=False) | |
| with gr.Row(): | |
| with gr.Column(): | |
| feedback_box = gr.Textbox(label="Enter your feedback here...", lines=3, interactive=True) | |
| feedback_button = gr.Button("Submit Feedback") | |
| status_text = gr.Textbox(label="Status", lines=1, interactive=False) | |
| submit_button.click( | |
| run_model_on_text, | |
| inputs=prompt_box, | |
| outputs=output_box | |
| ) | |
| def submit_feedback(prompt, generated_text, feedback): | |
| data = { | |
| "prompt": prompt, | |
| "generated_text": generated_text, | |
| "feedback": feedback | |
| } | |
| hf_writer.flag(data) | |
| return "Feedback submitted." | |
| feedback_button.click( | |
| submit_feedback, | |
| inputs=[prompt_box, output_box, feedback_box], | |
| outputs=status_text | |
| ) | |
| # feedback_button.click( | |
| # hf_writer.flag([prompt_box,output_box,feedback_box]), | |
| # # lambda *args: hf_writer.flag(args), | |
| # inputs=[prompt_box, output_box, feedback_box], | |
| # outputs=status_text, | |
| # ) | |
| # gr.Interface(lambda x:x, "text", "text", allow_flagging="manual", flagging_callback=hf_writer) | |
| # feedback_button.click( | |
| # save_feedback, | |
| # inputs=[prompt_box, output_box, feedback_box], | |
| # outputs=status_text | |
| # ) | |
| demo.launch() | |
| #---------------------------------- | |
| # prompt_box = gr.Textbox( | |
| # lines=25, | |
| # label="Research Information", | |
| # placeholder=place_holder, | |
| # value=prefilled_value, | |
| # ) | |
| # output_box = gr.Textbox( | |
| # lines=25, | |
| # label="Eligiblecriteria Criteria", | |
| # ) | |
| # demo = gr.Interface( | |
| # fn=run_model_on_text, | |
| # inputs=prompt_box, | |
| # outputs=output_box, | |
| # allow_flagging='auto', | |
| # ) | |
| # demo.queue(max_size=20).launch(debug=True, share=True) |