Update app.py
Browse files
app.py
CHANGED
|
@@ -11,6 +11,7 @@ download_datasets_from_github(os.getcwd())
|
|
| 11 |
|
| 12 |
dataset_items = {dataset.name: dataset for dataset in ABSADatasetList()}
|
| 13 |
|
|
|
|
| 14 |
def get_example(dataset):
|
| 15 |
task = 'apc'
|
| 16 |
dataset_file = detect_infer_dataset(dataset_items[dataset], task)
|
|
@@ -36,7 +37,7 @@ aspect_extractor = ATEPCCheckpointManager.get_aspect_extractor(checkpoint='multi
|
|
| 36 |
|
| 37 |
def perform_inference(text, dataset):
|
| 38 |
if not text:
|
| 39 |
-
text = dataset_dict[dataset][random.randint(0, len(dataset_dict[dataset])-1)]
|
| 40 |
|
| 41 |
result = aspect_extractor.extract_aspect(inference_source=[text],
|
| 42 |
pred_sentiment=True)
|
|
@@ -56,17 +57,18 @@ demo = gr.Blocks()
|
|
| 56 |
|
| 57 |
with demo:
|
| 58 |
gr.Markdown("# <p align='center'>Multilingual Aspect-based Sentiment Analysis !</p>")
|
| 59 |
-
gr.Markdown("### Repo: [PyABSA](https://github.com/yangheng95/PyABSA)
|
| 60 |
-
|
| 61 |
[](https://pepy.tech/project/pyabsa)
|
| 62 |
[](https://pepy.tech/project/pyabsa)
|
| 63 |
"""
|
| 64 |
)
|
| 65 |
gr.Markdown("Your input text should be no more than 80 words, that's the longest text we used in training. However, you can try longer text in self-training ")
|
|
|
|
| 66 |
output_dfs = []
|
| 67 |
with gr.Row():
|
| 68 |
with gr.Column():
|
| 69 |
-
input_sentence = gr.Textbox(placeholder='Leave blank
|
| 70 |
gr.Markdown("You can find the datasets at [github.com/yangheng95/ABSADatasets](https://github.com/yangheng95/ABSADatasets/tree/v1.2/datasets/text_classification)")
|
| 71 |
dataset_ids = gr.Radio(choices=[dataset.name for dataset in ABSADatasetList()[:-1]], value='Laptop14', label="Datasets")
|
| 72 |
inference_button = gr.Button("Let's go!")
|
|
|
|
| 11 |
|
| 12 |
dataset_items = {dataset.name: dataset for dataset in ABSADatasetList()}
|
| 13 |
|
| 14 |
+
|
| 15 |
def get_example(dataset):
|
| 16 |
task = 'apc'
|
| 17 |
dataset_file = detect_infer_dataset(dataset_items[dataset], task)
|
|
|
|
| 37 |
|
| 38 |
def perform_inference(text, dataset):
|
| 39 |
if not text:
|
| 40 |
+
text = dataset_dict[dataset][random.randint(0, len(dataset_dict[dataset]) - 1)]
|
| 41 |
|
| 42 |
result = aspect_extractor.extract_aspect(inference_source=[text],
|
| 43 |
pred_sentiment=True)
|
|
|
|
| 57 |
|
| 58 |
with demo:
|
| 59 |
gr.Markdown("# <p align='center'>Multilingual Aspect-based Sentiment Analysis !</p>")
|
| 60 |
+
gr.Markdown("""### Repo: [PyABSA](https://github.com/yangheng95/PyABSA)
|
| 61 |
+
### Author: [Heng Yang](https://github.com/yangheng95) (杨恒)
|
| 62 |
[](https://pepy.tech/project/pyabsa)
|
| 63 |
[](https://pepy.tech/project/pyabsa)
|
| 64 |
"""
|
| 65 |
)
|
| 66 |
gr.Markdown("Your input text should be no more than 80 words, that's the longest text we used in training. However, you can try longer text in self-training ")
|
| 67 |
+
gr.Markdown("**You don't need to split each Chinese (Korean, etc.) token as the provided, just input the natural language text.**")
|
| 68 |
output_dfs = []
|
| 69 |
with gr.Row():
|
| 70 |
with gr.Column():
|
| 71 |
+
input_sentence = gr.Textbox(placeholder='Leave this box blank and choose a dataset will give you a random example...', label="Example:")
|
| 72 |
gr.Markdown("You can find the datasets at [github.com/yangheng95/ABSADatasets](https://github.com/yangheng95/ABSADatasets/tree/v1.2/datasets/text_classification)")
|
| 73 |
dataset_ids = gr.Radio(choices=[dataset.name for dataset in ABSADatasetList()[:-1]], value='Laptop14', label="Datasets")
|
| 74 |
inference_button = gr.Button("Let's go!")
|