diff --git a/README.md b/README.md
index d0d335d5ad4679da27a8d2d8d1f19aea3bc0421c..9be40fde46b0a6473888c07c7dbc5af4e2787ce4 100644
--- a/README.md
+++ b/README.md
@@ -1,12 +1,11 @@
---
-title: Iframe Test
-emoji: 🐠
-colorFrom: red
-colorTo: red
+title: iFrame Resizer Test
+emoji: ⚡
+colorFrom: green
+colorTo: green
sdk: gradio
-sdk_version: 2.9.1
-app_file: app.py
-pinned: false
+python_version: 3.10.4
+app_file: start.py
+models: [osanseviero/BigGAN-deep-128, t5-small]
+datasets: [emotion]
---
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
diff --git a/modules/app.py b/modules/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..47844882f87cc97181a32fb38afa7b3c9ba3562b
--- /dev/null
+++ b/modules/app.py
@@ -0,0 +1,51 @@
+import os
+import requests
+import json
+from io import BytesIO
+
+from fastapi import FastAPI
+from fastapi.staticfiles import StaticFiles
+from fastapi.responses import FileResponse, StreamingResponse
+
+from modules.inference import infer_t5
+from modules.dataset import query_emotion
+
+# https://huggingface.co/settings/tokens
+# https://huggingface.co/spaces/{username}/{space}/settings
+API_TOKEN = os.getenv("BIG_GAN_TOKEN")
+
+app = FastAPI(docs_url=None, redoc_url=None)
+
+app.mount("/static", StaticFiles(directory="static"), name="static")
+
+
+@app.head("/")
+@app.get("/")
+def index() -> FileResponse:
+ return FileResponse(path="static/index.html", media_type="text/html")
+
+
+@app.get("/infer_biggan")
+def biggan(input):
+ output = requests.request(
+ "POST",
+ "https://api-inference.huggingface.co/models/osanseviero/BigGAN-deep-128",
+ headers={"Authorization": f"Bearer {API_TOKEN}"},
+ data=json.dumps(input),
+ )
+
+ return StreamingResponse(BytesIO(output.content), media_type="image/png")
+
+
+@app.get("/infer_t5")
+def t5(input):
+ output = infer_t5(input)
+
+ return {"output": output}
+
+
+@app.get("/query_emotion")
+def emotion(start, end):
+ output = query_emotion(int(start), int(end))
+
+ return {"output": output}
diff --git a/modules/dataset.py b/modules/dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..26d9108c537d6fbb2b054e23bc169e1c4fd2aa07
--- /dev/null
+++ b/modules/dataset.py
@@ -0,0 +1,19 @@
+from datasets import load_dataset
+
+dataset = load_dataset("emotion", split="train")
+
+emotions = dataset.info.features["label"].names
+
+def query_emotion(start, end):
+ rows = dataset[start:end]
+ texts, labels = [rows[k] for k in rows.keys()]
+
+ observations = []
+
+ for i, text in enumerate(texts):
+ observations.append({
+ "text": text,
+ "emotion": emotions[labels[i]],
+ })
+
+ return observations
diff --git a/modules/inference.py b/modules/inference.py
new file mode 100644
index 0000000000000000000000000000000000000000..fbf5cce09c4dd0844bb300e7afb161a15f7b0149
--- /dev/null
+++ b/modules/inference.py
@@ -0,0 +1,11 @@
+from transformers import T5Tokenizer, T5ForConditionalGeneration
+
+tokenizer = T5Tokenizer.from_pretrained("t5-small")
+model = T5ForConditionalGeneration.from_pretrained("t5-small")
+
+
+def infer_t5(input):
+ input_ids = tokenizer(input, return_tensors="pt").input_ids
+ outputs = model.generate(input_ids)
+
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a5ef1889f75dd1a4c3d5368400557c03b7cdfc3a
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,7 @@
+datasets==2.*
+fastapi==0.74.*
+requests==2.27.*
+sentencepiece==0.1.*
+torch==1.11.*
+transformers==4.*
+uvicorn[standard]==0.17.*
diff --git a/start.py b/start.py
new file mode 100644
index 0000000000000000000000000000000000000000..f5b9217e0ef966bb596f57a8ec32839f7cd3eafe
--- /dev/null
+++ b/start.py
@@ -0,0 +1,3 @@
+import subprocess
+
+subprocess.run("uvicorn modules.app:app --host 0.0.0.0 --port 7860", shell=True)
diff --git a/static/index.html b/static/index.html
new file mode 100644
index 0000000000000000000000000000000000000000..43fe1544893bcc2e40b8313872df74a8e90d4f59
--- /dev/null
+++ b/static/index.html
@@ -0,0 +1,1919 @@
+
+
+
+
+
+ Fast API 🤗 Space served with Uvicorn
+
+
+
+
+
+ Fast API 🤗 Space served with Uvicorn
+
+ Image generation from Inference API
+
+ Model:
+ osanseviero/BigGAN-deep-128
+
+
+
+
+
+
+ Text generation from transformers library
+
+ Model:
+ t5-small
+
+
+
+
+ Dataset from datasets library
+
+ Dataset:
+ emotion
+
+
+
+
+
+
+
+
+
+
+
diff --git a/static/index.js b/static/index.js
new file mode 100644
index 0000000000000000000000000000000000000000..da58d658fda06c7aed1a8384db3cd19e5f8f7a3e
--- /dev/null
+++ b/static/index.js
@@ -0,0 +1,126 @@
+if (document.location.search.includes('dark-theme=true')) {
+ document.body.classList.add('dark-theme');
+}
+
+let cursor = 0;
+const RANGE = 5;
+const LIMIT = 16_000;
+
+const textToImage = async (text) => {
+ const inferenceResponse = await fetch(`infer_biggan?input=${text}`);
+ const inferenceBlob = await inferenceResponse.blob();
+
+ return URL.createObjectURL(inferenceBlob);
+};
+
+const translateText = async (text) => {
+ const inferResponse = await fetch(`infer_t5?input=${text}`);
+ const inferJson = await inferResponse.json();
+
+ return inferJson.output;
+};
+
+const queryDataset = async (start, end) => {
+ const queryResponse = await fetch(`query_emotion?start=${start}&end=${end}`);
+ const queryJson = await queryResponse.json();
+
+ return queryJson.output;
+};
+
+const updateTable = async (cursor, range = RANGE) => {
+ const table = document.querySelector('.dataset-output');
+
+ const fragment = new DocumentFragment();
+
+ const observations = await queryDataset(cursor, cursor + range);
+
+ for (const observation of observations) {
+ let row = document.createElement('tr');
+ let text = document.createElement('td');
+ let emotion = document.createElement('td');
+
+ text.textContent = observation.text;
+ emotion.textContent = observation.emotion;
+
+ row.appendChild(text);
+ row.appendChild(emotion);
+ fragment.appendChild(row);
+ }
+
+ table.innerHTML = '';
+
+ table.appendChild(fragment);
+
+ table.insertAdjacentHTML(
+ 'afterbegin',
+ `
+
+ | text |
+ emotion |
+
+ `
+ );
+};
+
+const imageGenSelect = document.getElementById('image-gen-input');
+const imageGenImage = document.querySelector('.image-gen-output');
+const textGenForm = document.querySelector('.text-gen-form');
+const tableButtonPrev = document.querySelector('.table-previous');
+const tableButtonNext = document.querySelector('.table-next');
+
+imageGenSelect.addEventListener('change', async (event) => {
+ const value = event.target.value;
+
+ try {
+ imageGenImage.src = await textToImage(value);
+ imageGenImage.alt = value + ' generated from BigGAN AI model';
+ } catch (err) {
+ console.error(err);
+ }
+});
+
+textGenForm.addEventListener('submit', async (event) => {
+ event.preventDefault();
+
+ const textGenInput = document.getElementById('text-gen-input');
+ const textGenParagraph = document.querySelector('.text-gen-output');
+
+ try {
+ textGenParagraph.textContent = await translateText(textGenInput.value);
+ } catch (err) {
+ console.error(err);
+ }
+});
+
+tableButtonPrev.addEventListener('click', () => {
+ cursor = cursor > RANGE ? cursor - RANGE : 0;
+
+ if (cursor < RANGE) {
+ tableButtonPrev.classList.add('hidden');
+ }
+ if (cursor < LIMIT - RANGE) {
+ tableButtonNext.classList.remove('hidden');
+ }
+
+ updateTable(cursor);
+});
+
+tableButtonNext.addEventListener('click', () => {
+ cursor = cursor < LIMIT - RANGE ? cursor + RANGE : cursor;
+
+ if (cursor >= RANGE) {
+ tableButtonPrev.classList.remove('hidden');
+ }
+ if (cursor >= LIMIT - RANGE) {
+ tableButtonNext.classList.add('hidden');
+ }
+
+ updateTable(cursor);
+});
+
+textToImage(imageGenSelect.value)
+ .then((image) => (imageGenImage.src = image))
+ .catch(console.error);
+
+updateTable(cursor)
+ .catch(console.error);
diff --git a/static/style.css b/static/style.css
new file mode 100644
index 0000000000000000000000000000000000000000..6a3c98f8fab848caaaf7b844b24ce23c8c5c8dde
--- /dev/null
+++ b/static/style.css
@@ -0,0 +1,79 @@
+body {
+ --text: hsl(0 0% 15%);
+ padding: 2.5rem;
+ font-family: sans-serif;
+ color: var(--text);
+}
+body.dark-theme {
+ --text: hsl(0 0% 90%);
+ background-color: hsl(223 39% 7%);
+}
+
+main {
+ max-width: 80rem;
+ text-align: center;
+}
+
+section {
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+}
+
+a {
+ color: var(--text);
+}
+
+select, input, button, .text-gen-output {
+ padding: 0.5rem 1rem;
+}
+
+select, img, input {
+ margin: 0.5rem auto 1rem;
+}
+
+form {
+ width: 25rem;
+ margin: 0 auto;
+}
+
+input {
+ width: 70%;
+}
+
+button {
+ cursor: pointer;
+}
+
+.text-gen-output {
+ min-height: 1.2rem;
+ margin: 1rem;
+ border: 0.5px solid grey;
+}
+
+#dataset button {
+ width: 6rem;
+ margin: 0.5rem;
+}
+
+#dataset button.hidden {
+ visibility: hidden;
+}
+
+table {
+ max-width: 40rem;
+ text-align: left;
+ border-collapse: collapse;
+}
+
+thead {
+ font-weight: bold;
+}
+
+td {
+ padding: 0.5rem;
+}
+
+td:not(thead td) {
+ border: 0.5px solid grey;
+}