Spaces:
Runtime error
Runtime error
Commit
·
8b842e0
1
Parent(s):
b5bd188
Remove sidebar
Browse files- app.py +10 -12
- sections/caveats.md +1 -1
- sections/references.md +15 -0
app.py
CHANGED
|
@@ -54,15 +54,6 @@ st.set_page_config(
|
|
| 54 |
st.title("Multilingual Visual Question Answering")
|
| 55 |
st.write("[Gunjan Chhablani](https://huggingface.co/gchhablani), [Bhavitvya Malik](https://huggingface.co/bhavitvyamalik)")
|
| 56 |
|
| 57 |
-
|
| 58 |
-
st.sidebar.write(read_markdown("about.md"))
|
| 59 |
-
st.sidebar.write(read_markdown("caveats.md"))
|
| 60 |
-
st.sidebar.write(read_markdown("challenges.md"))
|
| 61 |
-
st.sidebar.write(read_markdown("social_impact.md"))
|
| 62 |
-
st.sidebar.write(read_markdown("checkpoints.md"))
|
| 63 |
-
st.sidebar.write(read_markdown("acknowledgements.md"))
|
| 64 |
-
|
| 65 |
-
|
| 66 |
with st.beta_expander("Usage"):
|
| 67 |
st.markdown(read_markdown("usage.md"))
|
| 68 |
|
|
@@ -71,8 +62,6 @@ with st.beta_expander("Method"):
|
|
| 71 |
st.markdown(read_markdown("pretraining.md"))
|
| 72 |
st.markdown(read_markdown("finetuning.md"))
|
| 73 |
|
| 74 |
-
|
| 75 |
-
|
| 76 |
first_index = 20
|
| 77 |
# Init Session State
|
| 78 |
if state.image_file is None:
|
|
@@ -130,4 +119,13 @@ logits = softmax(logits)
|
|
| 130 |
labels, values = get_top_5_predictions(logits, answer_reverse_mapping)
|
| 131 |
translated_labels = translate_labels(labels, state.answer_lang_id)
|
| 132 |
fig = plotly_express_horizontal_bar_plot(values, translated_labels)
|
| 133 |
-
st.plotly_chart(fig, use_container_width = True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
st.title("Multilingual Visual Question Answering")
|
| 55 |
st.write("[Gunjan Chhablani](https://huggingface.co/gchhablani), [Bhavitvya Malik](https://huggingface.co/bhavitvyamalik)")
|
| 56 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
with st.beta_expander("Usage"):
|
| 58 |
st.markdown(read_markdown("usage.md"))
|
| 59 |
|
|
|
|
| 62 |
st.markdown(read_markdown("pretraining.md"))
|
| 63 |
st.markdown(read_markdown("finetuning.md"))
|
| 64 |
|
|
|
|
|
|
|
| 65 |
first_index = 20
|
| 66 |
# Init Session State
|
| 67 |
if state.image_file is None:
|
|
|
|
| 119 |
labels, values = get_top_5_predictions(logits, answer_reverse_mapping)
|
| 120 |
translated_labels = translate_labels(labels, state.answer_lang_id)
|
| 121 |
fig = plotly_express_horizontal_bar_plot(values, translated_labels)
|
| 122 |
+
st.plotly_chart(fig, use_container_width = True)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
st.write(read_markdown("about.md"))
|
| 126 |
+
st.write(read_markdown("caveats.md"))
|
| 127 |
+
st.write(read_markdown("challenges.md"))
|
| 128 |
+
st.write(read_markdown("social_impact.md"))
|
| 129 |
+
st.write(read_markdown("references.md"))
|
| 130 |
+
st.write(read_markdown("checkpoints.md"))
|
| 131 |
+
st.write(read_markdown("acknowledgements.md"))
|
sections/caveats.md
CHANGED
|
@@ -1 +1 @@
|
|
| 1 |
-
**
|
|
|
|
| 1 |
+
**Caveats**: The best fine-tuned model only achieves 0.49 accuracy on the multilingual validation data that we create. This could be because of not-so-great quality translations, sub-optimal hyperparameters and lack of ample training. In future, we hope to improve this model by addressing such concerns.
|
sections/references.md
CHANGED
|
@@ -1 +1,16 @@
|
|
| 1 |
# References
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
# References
|
| 2 |
+
- [Conceptual 12M Dataset](https://github.com/google-research-datasets/conceptual-12m)
|
| 3 |
+
|
| 4 |
+
- [VQA v2 Dataset](https://visualqa.org/challenge.html)
|
| 5 |
+
|
| 6 |
+
- [Hybrid CLIP Example](https://github.com/huggingface/transformers/blob/master/src/transformers/models/clip/modeling_flax_clip.py)
|
| 7 |
+
|
| 8 |
+
- [VisualBERT Modeling File](https://github.com/huggingface/transformers/blob/master/src/transformers/models/visual_bert/modeling_visual_bert.py)
|
| 9 |
+
|
| 10 |
+
- [BERT Modeling File](https://github.com/huggingface/transformers/blob/master/src/transformers/models/bert/modeling_flax_bert.py)
|
| 11 |
+
|
| 12 |
+
- [CLIP Modeling File](https://github.com/huggingface/transformers/blob/master/src/transformers/models/clip/modeling_flax_clip.py)
|
| 13 |
+
|
| 14 |
+
- [Summarization Training Script](https://github.com/huggingface/transformers/blob/master/examples/flax/summarization/run_summarization_flax.py)
|
| 15 |
+
|
| 16 |
+
- [MLM Training Script](https://github.com/huggingface/transformers/blob/2df63282e010ac518683252d8ddba21e58d2faf3/examples/flax/language-modeling/run_mlm_flax.py)
|