Spaces:
Runtime error
Runtime error
| import streamlit as st | |
| import pandas as pd | |
| import spacy | |
| from transformers import pipeline, AutoModelForTokenClassification, AutoTokenizer | |
| import PyPDF2 | |
| import docx | |
| import io | |
| st.set_page_config(layout="wide") | |
| # Function to read text from uploaded file | |
| def read_file(file): | |
| if file.type == "text/plain": | |
| return file.getvalue().decode("utf-8") | |
| elif file.type == "application/pdf": | |
| pdf_reader = PyPDF2.PdfReader(io.BytesIO(file.getvalue())) | |
| return " ".join(page.extract_text() for page in pdf_reader.pages) | |
| elif file.type == "application/vnd.openxmlformats-officedocument.wordprocessingml.document": | |
| doc = docx.Document(io.BytesIO(file.getvalue())) | |
| return " ".join(paragraph.text for paragraph in doc.paragraphs) | |
| else: | |
| st.error("Unsupported file type") | |
| return None | |
| # Rest of your code remains the same | |
| example_list = [ | |
| "Mustafa Kemal Atatürk 1919 yılında Samsun'a çıktı.", | |
| """Mustafa Kemal Atatürk, Türk asker, devlet adamı ve Türkiye Cumhuriyeti'nin kurucusudur. | |
| # ... (rest of the example text) | |
| """ | |
| ] | |
| st.title("Demo for Turkish NER Models") | |
| model_list = [ | |
| 'akdeniz27/bert-base-turkish-cased-ner', | |
| 'akdeniz27/convbert-base-turkish-cased-ner', | |
| 'girayyagmur/bert-base-turkish-ner-cased', | |
| 'FacebookAI/xlm-roberta-large', | |
| 'savasy/bert-base-turkish-ner-cased', | |
| 'xlm-roberta-large-finetuned-conll03-english', | |
| 'asahi417/tner-xlm-roberta-base-ontonotes5' | |
| ] | |
| st.sidebar.header("Select NER Model") | |
| model_checkpoint = st.sidebar.radio("", model_list) | |
| st.sidebar.write("For details of models: 'https://huggingface.co/akdeniz27/") | |
| st.sidebar.write("") | |
| if model_checkpoint in ["akdeniz27/xlm-roberta-base-turkish-ner", "xlm-roberta-large-finetuned-conll03-english", "asahi417/tner-xlm-roberta-base-ontonotes5"]: | |
| aggregation = "simple" | |
| if model_checkpoint != "akdeniz27/xlm-roberta-base-turkish-ner": | |
| st.sidebar.write("The selected NER model is included just to show the zero-shot transfer learning capability of XLM-Roberta pretrained language model.") | |
| else: | |
| aggregation = "first" | |
| st.subheader("Select Text Input Method") | |
| input_method = st.radio("", ('Select from Examples', 'Write or Paste New Text', 'Upload File')) | |
| if input_method == 'Select from Examples': | |
| selected_text = st.selectbox('Select Text from List', example_list, index=0, key=1) | |
| input_text = st.text_area("Selected Text", selected_text, height=128, max_chars=None, key=2) | |
| elif input_method == "Write or Paste New Text": | |
| input_text = st.text_area('Write or Paste Text Below', value="", height=128, max_chars=None, key=2) | |
| else: | |
| uploaded_file = st.file_uploader("Choose a file", type=["txt", "pdf", "docx"]) | |
| if uploaded_file is not None: | |
| input_text = read_file(uploaded_file) | |
| if input_text: | |
| st.text_area("Extracted Text", input_text, height=128, max_chars=None, key=2) | |
| else: | |
| input_text = "" | |
| # Rest of your functions (setModel, get_html, entity_comb) remain the same | |
| def setModel(model_checkpoint, aggregation): | |
| model = AutoModelForTokenClassification.from_pretrained(model_checkpoint) | |
| tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) | |
| return pipeline('ner', model=model, tokenizer=tokenizer, aggregation_strategy=aggregation) | |
| def get_html(html: str): | |
| WRAPPER = """<div style="overflow-x: auto; border: 1px solid #e6e9ef; border-radius: 0.25rem; padding: 1rem; margin-bottom: 2.5rem">{}</div>""" | |
| html = html.replace("\n", " ") | |
| return WRAPPER.format(html) | |
| def entity_comb(output): | |
| output_comb = [] | |
| for ind, entity in enumerate(output): | |
| if ind == 0: | |
| output_comb.append(entity) | |
| elif output[ind]["start"] == output[ind-1]["end"] and output[ind]["entity_group"] == output[ind-1]["entity_group"]: | |
| output_comb[-1]["word"] = output_comb[-1]["word"] + output[ind]["word"] | |
| output_comb[-1]["end"] = output[ind]["end"] | |
| else: | |
| output_comb.append(entity) | |
| return output_comb | |
| Run_Button = st.button("Run", key=None) | |
| if Run_Button and input_text != "": | |
| # Your existing processing code remains the same | |
| ner_pipeline = setModel(model_checkpoint, aggregation) | |
| output = ner_pipeline(input_text) | |
| output_comb = entity_comb(output) | |
| df = pd.DataFrame.from_dict(output_comb) | |
| cols_to_keep = ['word','entity_group','score','start','end'] | |
| df_final = df[cols_to_keep] | |
| st.subheader("Recognized Entities") | |
| st.dataframe(df_final) | |
| st.subheader("Spacy Style Display") | |
| spacy_display = {} | |
| spacy_display["ents"] = [] | |
| spacy_display["text"] = input_text | |
| spacy_display["title"] = None | |
| for entity in output_comb: | |
| spacy_display["ents"].append({"start": entity["start"], "end": entity["end"], "label": entity["entity_group"]}) | |
| tner_entity_list = ["person", "group", "facility", "organization", "geopolitical area", "location", "product", "event", "work of art", "law", "language", "date", "time", "percent", "money", "quantity", "ordinal number", "cardinal number"] | |
| spacy_entity_list = ["PERSON", "NORP", "FAC", "ORG", "GPE", "LOC", "PRODUCT", "EVENT", "WORK_OF_ART", "LAW", "LANGUAGE", "DATE", "TIME", "PERCENT", "MONEY", "QUANTITY", "ORDINAL", "CARDINAL", "MISC"] | |
| for ent in spacy_display["ents"]: | |
| if model_checkpoint == "asahi417/tner-xlm-roberta-base-ontonotes5": | |
| ent["label"] = spacy_entity_list[tner_entity_list.index(ent["label"])] | |
| else: | |
| if ent["label"] == "PER": ent["label"] = "PERSON" | |
| html = spacy.displacy.render(spacy_display, style="ent", minify=True, manual=True, options={"ents": spacy_entity_list}) | |
| style = "<style>mark.entity { display: inline-block }</style>" | |
| st.write(f"{style}{get_html(html)}", unsafe_allow_html=True) |