test create a video
Browse files- app.py +50 -7
- requirements.txt +1 -0
app.py
CHANGED
|
@@ -21,6 +21,9 @@ import numpy as np
|
|
| 21 |
from fsspec import url_to_fs
|
| 22 |
from matplotlib import cm
|
| 23 |
from PIL import Image
|
|
|
|
|
|
|
|
|
|
| 24 |
|
| 25 |
repo_id = "lhoestq/turbulent_radiative_layer_tcool_demo"
|
| 26 |
set_path = f"hf://datasets/{repo_id}/**/*.hdf5"
|
|
@@ -45,9 +48,43 @@ def get_images(path: str, scalar_field: str, trajectory: int) -> list[Image.Imag
|
|
| 45 |
out = np.uint8(cm.RdBu_r(out) * 255)
|
| 46 |
return [Image.fromarray(img) for img in out]
|
| 47 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
default_scalar_fields = get_scalar_fields(paths[0])
|
| 49 |
default_trajectories = get_trajectories(paths[0], default_scalar_fields[0])
|
| 50 |
default_images = get_images(paths[0], default_scalar_fields[0], default_trajectories[0])
|
|
|
|
| 51 |
|
| 52 |
with gr.Blocks() as demo:
|
| 53 |
gr.Markdown(f"# π HDF5 Viewer for the [{repo_id}](https://huggingface.co/datasets/{repo_id}) Dataset π")
|
|
@@ -56,35 +93,41 @@ with gr.Blocks() as demo:
|
|
| 56 |
files_dropdown = gr.Dropdown(choices=paths, value=paths[0], label="File", scale=4)
|
| 57 |
scalar_fields_dropdown = gr.Dropdown(choices=default_scalar_fields, value=default_scalar_fields[0], label="Physical field")
|
| 58 |
trajectory_dropdown = gr.Dropdown(choices=default_trajectories, value=default_trajectories[0], label="Trajectory")
|
| 59 |
-
gallery = gr.Gallery(default_images, preview=
|
| 60 |
gr.Markdown("_Tip: click on the image to go forward or backwards_")
|
|
|
|
| 61 |
|
| 62 |
-
@files_dropdown.select(inputs=[files_dropdown], outputs=[scalar_fields_dropdown, trajectory_dropdown, gallery])
|
| 63 |
def _update_file(path: str):
|
| 64 |
scalar_fields = get_scalar_fields(path)
|
| 65 |
trajectories = get_trajectories(path, scalar_fields[0])
|
| 66 |
images = get_images(path, scalar_fields[0], trajectories[0])
|
|
|
|
| 67 |
yield {
|
| 68 |
scalar_fields_dropdown: gr.Dropdown(choices=scalar_fields, value=scalar_fields[0]),
|
| 69 |
trajectory_dropdown: gr.Dropdown(choices=trajectories, value=trajectories[0]),
|
| 70 |
-
gallery: gr.Gallery(images)
|
|
|
|
| 71 |
}
|
| 72 |
yield {gallery: gr.Gallery(selected_index=len(default_images) // 2)}
|
| 73 |
|
| 74 |
-
@scalar_fields_dropdown.select(inputs=[files_dropdown, scalar_fields_dropdown], outputs=[trajectory_dropdown, gallery])
|
| 75 |
def _update_scalar_field(path: str, scalar_field: str):
|
| 76 |
trajectories = get_trajectories(path, scalar_field)
|
| 77 |
images = get_images(path, scalar_field, trajectories[0])
|
|
|
|
| 78 |
yield {
|
| 79 |
trajectory_dropdown: gr.Dropdown(choices=trajectories, value=trajectories[0]),
|
| 80 |
-
gallery: gr.Gallery(images)
|
|
|
|
| 81 |
}
|
| 82 |
yield {gallery: gr.Gallery(selected_index=len(default_images) // 2)}
|
| 83 |
|
| 84 |
-
@trajectory_dropdown.select(inputs=[files_dropdown, scalar_fields_dropdown, trajectory_dropdown], outputs=[gallery])
|
| 85 |
def _update_trajectory(path: str, scalar_field: str, trajectory: int):
|
| 86 |
images = get_images(path, scalar_field, trajectory)
|
| 87 |
-
|
|
|
|
| 88 |
yield {gallery: gr.Gallery(selected_index=len(default_images) // 2)}
|
| 89 |
|
| 90 |
demo.launch()
|
|
|
|
| 21 |
from fsspec import url_to_fs
|
| 22 |
from matplotlib import cm
|
| 23 |
from PIL import Image
|
| 24 |
+
import av
|
| 25 |
+
import io
|
| 26 |
+
|
| 27 |
|
| 28 |
repo_id = "lhoestq/turbulent_radiative_layer_tcool_demo"
|
| 29 |
set_path = f"hf://datasets/{repo_id}/**/*.hdf5"
|
|
|
|
| 48 |
out = np.uint8(cm.RdBu_r(out) * 255)
|
| 49 |
return [Image.fromarray(img) for img in out]
|
| 50 |
|
| 51 |
+
fps = 25
|
| 52 |
+
# @lru_cache(maxsize=4)
|
| 53 |
+
def get_video(path: str, scalar_field: str, trajectory: int) -> str:
|
| 54 |
+
video_filename = 'output_vid.webm'
|
| 55 |
+
|
| 56 |
+
out = files[path]["t0_fields"][scalar_field][trajectory]
|
| 57 |
+
out = np.log(out) # not sure why
|
| 58 |
+
out = (out - out.min()) / (out.max() - out.min())
|
| 59 |
+
out = np.uint8(cm.RdBu_r(out) * 255)
|
| 60 |
+
|
| 61 |
+
output = av.open(video_filename, 'w')
|
| 62 |
+
stream = output.add_stream('libvpx-vp9', str(fps))
|
| 63 |
+
width, height = out[0].shape[1], out[0].shape[0]
|
| 64 |
+
stream.width = width
|
| 65 |
+
stream.height = height
|
| 66 |
+
stream.pix_fmt = 'yuv444p' # or yuva420p
|
| 67 |
+
# stream.options = {'crf': '17'}
|
| 68 |
+
|
| 69 |
+
for img in out:
|
| 70 |
+
image = Image.fromarray(img)
|
| 71 |
+
frame = av.VideoFrame.from_image(image)
|
| 72 |
+
packet = stream.encode(frame)
|
| 73 |
+
output.mux(packet)
|
| 74 |
+
|
| 75 |
+
# Flush the encoder and close the "in memory" file:
|
| 76 |
+
packet = stream.encode(None)
|
| 77 |
+
output.mux(packet)
|
| 78 |
+
output.close()
|
| 79 |
+
return video_filename
|
| 80 |
+
|
| 81 |
+
# subprocess.run(["ffmpeg", "-y", "-framerate", "25", "-i", os.path.join(output_dir, "density_%d.png"), "-c:v", "libvpx-vp9", "-pix_fmt", "yuva420p", os.path.join(output_dir, "density.webm")])
|
| 82 |
+
|
| 83 |
+
|
| 84 |
default_scalar_fields = get_scalar_fields(paths[0])
|
| 85 |
default_trajectories = get_trajectories(paths[0], default_scalar_fields[0])
|
| 86 |
default_images = get_images(paths[0], default_scalar_fields[0], default_trajectories[0])
|
| 87 |
+
default_video = get_video(paths[0], default_scalar_fields[0], default_trajectories[0])
|
| 88 |
|
| 89 |
with gr.Blocks() as demo:
|
| 90 |
gr.Markdown(f"# π HDF5 Viewer for the [{repo_id}](https://huggingface.co/datasets/{repo_id}) Dataset π")
|
|
|
|
| 93 |
files_dropdown = gr.Dropdown(choices=paths, value=paths[0], label="File", scale=4)
|
| 94 |
scalar_fields_dropdown = gr.Dropdown(choices=default_scalar_fields, value=default_scalar_fields[0], label="Physical field")
|
| 95 |
trajectory_dropdown = gr.Dropdown(choices=default_trajectories, value=default_trajectories[0], label="Trajectory")
|
| 96 |
+
gallery = gr.Gallery(default_images, preview=False, selected_index=len(default_images) // 2)
|
| 97 |
gr.Markdown("_Tip: click on the image to go forward or backwards_")
|
| 98 |
+
video = gr.Video(default_video)
|
| 99 |
|
| 100 |
+
@files_dropdown.select(inputs=[files_dropdown], outputs=[scalar_fields_dropdown, trajectory_dropdown, gallery, video])
|
| 101 |
def _update_file(path: str):
|
| 102 |
scalar_fields = get_scalar_fields(path)
|
| 103 |
trajectories = get_trajectories(path, scalar_fields[0])
|
| 104 |
images = get_images(path, scalar_fields[0], trajectories[0])
|
| 105 |
+
vid = get_video(path, scalar_fields[0], trajectories[0])
|
| 106 |
yield {
|
| 107 |
scalar_fields_dropdown: gr.Dropdown(choices=scalar_fields, value=scalar_fields[0]),
|
| 108 |
trajectory_dropdown: gr.Dropdown(choices=trajectories, value=trajectories[0]),
|
| 109 |
+
gallery: gr.Gallery(images),
|
| 110 |
+
video: gr.Video(vid)
|
| 111 |
}
|
| 112 |
yield {gallery: gr.Gallery(selected_index=len(default_images) // 2)}
|
| 113 |
|
| 114 |
+
@scalar_fields_dropdown.select(inputs=[files_dropdown, scalar_fields_dropdown], outputs=[trajectory_dropdown, gallery, video])
|
| 115 |
def _update_scalar_field(path: str, scalar_field: str):
|
| 116 |
trajectories = get_trajectories(path, scalar_field)
|
| 117 |
images = get_images(path, scalar_field, trajectories[0])
|
| 118 |
+
vid = get_video(path, scalar_field, trajectories[0])
|
| 119 |
yield {
|
| 120 |
trajectory_dropdown: gr.Dropdown(choices=trajectories, value=trajectories[0]),
|
| 121 |
+
gallery: gr.Gallery(images),
|
| 122 |
+
video: gr.Video(vid)
|
| 123 |
}
|
| 124 |
yield {gallery: gr.Gallery(selected_index=len(default_images) // 2)}
|
| 125 |
|
| 126 |
+
@trajectory_dropdown.select(inputs=[files_dropdown, scalar_fields_dropdown, trajectory_dropdown], outputs=[gallery, video])
|
| 127 |
def _update_trajectory(path: str, scalar_field: str, trajectory: int):
|
| 128 |
images = get_images(path, scalar_field, trajectory)
|
| 129 |
+
vid = get_video(path, scalar_field, trajectory)
|
| 130 |
+
yield {gallery: gr.Gallery(images), video: gr.Video(vid)}
|
| 131 |
yield {gallery: gr.Gallery(selected_index=len(default_images) // 2)}
|
| 132 |
|
| 133 |
demo.launch()
|
requirements.txt
CHANGED
|
@@ -2,3 +2,4 @@ h5py
|
|
| 2 |
huggingface_hub
|
| 3 |
Pillow
|
| 4 |
numpy
|
|
|
|
|
|
| 2 |
huggingface_hub
|
| 3 |
Pillow
|
| 4 |
numpy
|
| 5 |
+
av
|