Spaces:
Running
Running
| import gradio as gr | |
| def get_process_config(): | |
| return { | |
| "process.numactl": gr.Checkbox( | |
| value=False, | |
| label="process.numactl", | |
| info="Runs the model with numactl", | |
| ), | |
| "process.numactl_kwargs": gr.Textbox( | |
| label="process.numactl_kwargs", | |
| value="{'cpunodebind': 0, 'membind': 0}", | |
| info="Additional python dict of kwargs to pass to numactl", | |
| ), | |
| } | |
| def get_inference_config(): | |
| return { | |
| "inference.warmup_runs": gr.Slider( | |
| step=1, | |
| value=10, | |
| minimum=0, | |
| maximum=10, | |
| label="inference.warmup_runs", | |
| info="Number of warmup runs", | |
| ), | |
| "inference.duration": gr.Slider( | |
| step=1, | |
| value=10, | |
| minimum=0, | |
| maximum=10, | |
| label="inference.duration", | |
| info="Minimum duration of the benchmark in seconds", | |
| ), | |
| "inference.iterations": gr.Slider( | |
| step=1, | |
| value=10, | |
| minimum=0, | |
| maximum=10, | |
| label="inference.iterations", | |
| info="Minimum number of iterations of the benchmark", | |
| ), | |
| "inference.latency": gr.Checkbox( | |
| value=True, | |
| label="inference.latency", | |
| info="Measures the latency of the model", | |
| ), | |
| "inference.memory": gr.Checkbox( | |
| value=False, | |
| label="inference.memory", | |
| info="Measures the peak memory consumption", | |
| ), | |
| } | |
| def get_pytorch_config(): | |
| return { | |
| "pytorch.torch_dtype": gr.Dropdown( | |
| value="float32", | |
| label="pytorch.torch_dtype", | |
| choices=["bfloat16", "float16", "float32", "auto"], | |
| info="The dtype to use for the model", | |
| ), | |
| "pytorch.torch_compile": gr.Checkbox( | |
| value=False, | |
| label="pytorch.torch_compile", | |
| info="Compiles the model with torch.compile", | |
| ), | |
| } | |
| def get_onnxruntime_config(): | |
| return { | |
| "onnxruntime.export": gr.Checkbox( | |
| value=True, | |
| label="onnxruntime.export", | |
| info="Exports the model to ONNX", | |
| ), | |
| "onnxruntime.use_cache": gr.Checkbox( | |
| value=True, | |
| label="onnxruntime.use_cache", | |
| info="Uses cached ONNX model if available", | |
| ), | |
| "onnxruntime.use_merged": gr.Checkbox( | |
| value=True, | |
| label="onnxruntime.use_merged", | |
| info="Uses merged ONNX model if available", | |
| ), | |
| "onnxruntime.torch_dtype": gr.Dropdown( | |
| value="float32", | |
| label="onnxruntime.torch_dtype", | |
| choices=["bfloat16", "float16", "float32", "auto"], | |
| info="The dtype to use for the model", | |
| ), | |
| } | |
| def get_openvino_config(): | |
| return { | |
| "openvino.export": gr.Checkbox( | |
| value=True, | |
| label="openvino.export", | |
| info="Exports the model to ONNX", | |
| ), | |
| "openvino.use_cache": gr.Checkbox( | |
| value=True, | |
| label="openvino.use_cache", | |
| info="Uses cached ONNX model if available", | |
| ), | |
| "openvino.use_merged": gr.Checkbox( | |
| value=True, | |
| label="openvino.use_merged", | |
| info="Uses merged ONNX model if available", | |
| ), | |
| "openvino.reshape": gr.Checkbox( | |
| value=False, | |
| label="openvino.reshape", | |
| info="Reshapes the model to the input shape", | |
| ), | |
| "openvino.half": gr.Checkbox( | |
| value=False, | |
| label="openvino.half", | |
| info="Converts model to half precision", | |
| ), | |
| } | |
| def get_ipex_config(): | |
| return {} | |