File size: 22,576 Bytes
5fff74f
 
 
 
 
 
 
 
 
 
1364cbe
5a87d26
5fff74f
 
ccc9d98
58a0ecb
5fff74f
1364cbe
e2af500
1b48b29
 
b6efc2b
5fff74f
d088d6c
1364cbe
 
 
 
 
 
5fff74f
 
 
 
 
 
 
 
 
1364cbe
 
 
b93ef90
1364cbe
 
 
 
 
5fff74f
 
 
 
 
 
 
 
 
 
 
 
783645d
5a87d26
b93ef90
5fff74f
5a87d26
 
 
 
 
 
b93ef90
5fff74f
2af96bc
1364cbe
2af96bc
b93ef90
 
 
1364cbe
b93ef90
 
 
 
 
1364cbe
 
 
5fff74f
 
 
 
 
 
 
 
 
 
 
 
1364cbe
 
5fff74f
 
 
 
 
1364cbe
 
 
5fff74f
 
 
 
b6efc2b
5fff74f
 
 
 
 
 
 
 
b6efc2b
5fff74f
b6efc2b
 
5fff74f
b6efc2b
 
5fff74f
b6efc2b
 
 
5fff74f
 
 
 
 
 
 
 
 
 
b6efc2b
 
 
5fff74f
 
 
b6efc2b
 
 
5fff74f
 
 
 
 
 
 
 
 
 
b6efc2b
 
5fff74f
 
 
 
b6efc2b
5fff74f
 
 
 
 
b6efc2b
5fff74f
 
 
b6efc2b
5fff74f
 
 
 
 
 
 
 
 
 
 
 
 
 
b6efc2b
 
5fff74f
 
 
 
b6efc2b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5fff74f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b6efc2b
 
5fff74f
 
 
b6efc2b
 
5fff74f
 
 
 
 
 
b6efc2b
1364cbe
5fff74f
 
 
 
 
320038e
 
5fff74f
 
5a05fd1
 
b93ef90
5fff74f
b93ef90
5fff74f
 
 
 
 
 
 
 
 
 
 
b93ef90
 
 
77cbaf0
b93ef90
 
 
 
 
5fff74f
b93ef90
5fff74f
b93ef90
5fff74f
b93ef90
 
 
 
 
 
 
 
 
e2af500
 
5fff74f
 
b93ef90
5fff74f
 
 
 
 
 
 
 
 
 
 
 
1364cbe
5fff74f
 
 
b93ef90
 
 
 
 
5fff74f
b93ef90
5fff74f
e2af500
5fff74f
e2af500
5fff74f
 
b6efc2b
 
5fff74f
 
b6efc2b
5fff74f
b6efc2b
5fff74f
 
b6efc2b
 
5fff74f
e2af500
5fff74f
e2af500
d088d6c
5fff74f
1364cbe
 
 
 
 
 
 
 
5fff74f
 
 
 
1364cbe
5fff74f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8643c6f
 
1364cbe
5fff74f
1364cbe
 
5fff74f
 
 
 
afb14c6
5fff74f
1364cbe
 
d088d6c
5fff74f
 
 
1364cbe
 
 
5fff74f
1364cbe
d088d6c
5fff74f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b6efc2b
5fff74f
b6efc2b
 
 
 
5fff74f
 
b6efc2b
 
 
 
 
5fff74f
b6efc2b
 
 
 
5fff74f
b6efc2b
 
 
5fff74f
 
 
b6efc2b
5fff74f
 
 
 
 
 
 
 
 
 
 
afb14c6
b6efc2b
afb14c6
 
5fff74f
 
b6efc2b
5fff74f
b6efc2b
5fff74f
b6efc2b
 
 
5fff74f
b6efc2b
5fff74f
b6efc2b
5fff74f
 
b6efc2b
5fff74f
b6efc2b
d088d6c
1364cbe
5fff74f
 
 
 
 
 
 
 
1364cbe
 
 
5fff74f
1364cbe
 
 
 
5fff74f
1364cbe
 
 
 
 
 
 
5fff74f
2af96bc
ccc9d98
2af96bc
 
 
5fff74f
 
b93ef90
 
 
 
 
2af96bc
5fff74f
e2af500
 
 
 
 
 
5fff74f
 
df30c7c
 
b6efc2b
df30c7c
 
 
 
811a3ef
df30c7c
 
5fff74f
df30c7c
 
 
1364cbe
 
5fff74f
df30c7c
1364cbe
 
 
 
5fff74f
018304a
1364cbe
 
5fff74f
1364cbe
 
 
5fff74f
1364cbe
b93ef90
e2af500
 
 
b93ef90
1364cbe
 
 
 
 
 
 
5fff74f
1364cbe
 
 
 
 
d088d6c
1364cbe
 
 
d088d6c
1364cbe
 
 
d088d6c
 
1364cbe
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
"""Convert Hugging Face models to ONNX format.

This application provides a Streamlit interface for converting Hugging Face models
to ONNX format using the Transformers.js conversion scripts. It handles:
- Model conversion with optional trust_remote_code and output_attentions
- Automatic task inference with fallback support
- README generation with merged metadata from the original model
- Upload to Hugging Face Hub
"""

import logging
import os
import re
import shutil
import subprocess
import sys
from dataclasses import dataclass
from pathlib import Path
from typing import List, Optional, Tuple

import streamlit as st
import yaml
from huggingface_hub import HfApi, hf_hub_download, model_info, whoami

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


@dataclass
class Config:
    """Application configuration containing authentication and path settings.

    Attributes:
        hf_token: Hugging Face API token (user token takes precedence over system token)
        hf_username: Hugging Face username associated with the token
        is_using_user_token: True if using a user-provided token, False if using system token
        hf_base_url: Base URL for Hugging Face Hub
        repo_path: Path to the bundled transformers.js repository
    """

    hf_token: str
    hf_username: str
    is_using_user_token: bool
    hf_base_url: str = "https://huggingface.co"
    repo_path: Path = Path("./transformers.js")

    @classmethod
    def from_env(cls) -> "Config":
        """Create configuration from environment variables and Streamlit session state.

        Priority order for tokens:
        1. User-provided token from Streamlit session (st.session_state.user_hf_token)
        2. System token from environment variable (HF_TOKEN)

        Returns:
            Config: Initialized configuration object

        Raises:
            ValueError: If no valid token is available
        """
        system_token = os.getenv("HF_TOKEN")
        user_token = st.session_state.get("user_hf_token")

        # Determine username based on which token is being used
        if user_token:
            hf_username = whoami(token=user_token)["name"]
        else:
            hf_username = (
                os.getenv("SPACE_AUTHOR_NAME") or whoami(token=system_token)["name"]
            )

        # User token takes precedence over system token
        hf_token = user_token or system_token

        if not hf_token:
            raise ValueError(
                "When the user token is not provided, the system token must be set."
            )

        return cls(
            hf_token=hf_token,
            hf_username=hf_username,
            is_using_user_token=bool(user_token),
        )


class ModelConverter:
    """Handles model conversion to ONNX format and upload to Hugging Face Hub.

    This class manages the entire conversion workflow:
    1. Fetching original model metadata and README
    2. Running the ONNX conversion subprocess
    3. Generating an enhanced README with merged metadata
    4. Uploading the converted model to Hugging Face Hub

    Attributes:
        config: Application configuration containing tokens and paths
        api: Hugging Face API client for repository operations
    """

    def __init__(self, config: Config):
        """Initialize the converter with configuration.

        Args:
            config: Application configuration object
        """
        self.config = config
        self.api = HfApi(token=config.hf_token)

    # ============================================================================
    # README Processing Methods
    # ============================================================================

    def _fetch_original_readme(self, repo_id: str) -> str:
        """Download the README from the original model repository.

        Args:
            repo_id: Hugging Face model repository ID (e.g., 'username/model-name')

        Returns:
            str: Content of the README file, or empty string if not found
        """
        try:
            readme_path = hf_hub_download(
                repo_id=repo_id, filename="README.md", token=self.config.hf_token
            )
            with open(readme_path, "r", encoding="utf-8", errors="ignore") as f:
                return f.read()
        except Exception:
            # Silently fail if README doesn't exist or can't be downloaded
            return ""

    def _strip_yaml_frontmatter(self, text: str) -> str:
        """Remove YAML frontmatter from text, returning only the body.

        YAML frontmatter is delimited by '---' at the start and end.

        Args:
            text: Text that may contain YAML frontmatter

        Returns:
            str: Text with frontmatter removed, or original text if no frontmatter found
        """
        if not text:
            return ""
        if text.startswith("---"):
            match = re.match(r"^---[\s\S]*?\n---\s*\n", text)
            if match:
                return text[match.end() :]
        return text

    def _extract_yaml_frontmatter(self, text: str) -> Tuple[dict, str]:
        """Parse and extract YAML frontmatter from text.

        Args:
            text: Text that may contain YAML frontmatter

        Returns:
            Tuple containing:
            - dict: Parsed YAML frontmatter as a dictionary (empty dict if none found)
            - str: Remaining body text after the frontmatter
        """
        if not text or not text.startswith("---"):
            return {}, text or ""

        # Match YAML frontmatter pattern: ---\n...content...\n---\n
        match = re.match(r"^---\s*\n([\s\S]*?)\n---\s*\n", text)
        if not match:
            return {}, text

        frontmatter_text = match.group(1)
        body = text[match.end() :]

        # Parse YAML safely, returning empty dict on any error
        try:
            parsed_data = yaml.safe_load(frontmatter_text)
            if not isinstance(parsed_data, dict):
                parsed_data = {}
        except Exception:
            parsed_data = {}

        return parsed_data, body

    def _get_pipeline_docs_url(self, pipeline_tag: Optional[str]) -> str:
        """Generate Transformers.js documentation URL for a given pipeline tag.

        Args:
            pipeline_tag: Hugging Face pipeline tag (e.g., 'text-generation')

        Returns:
            str: URL to the relevant Transformers.js pipeline documentation
        """
        base_url = "https://huggingface.co/docs/transformers.js/api/pipelines"

        if not pipeline_tag:
            return base_url

        # Map Hugging Face pipeline tags to Transformers.js pipeline class names
        pipeline_class_mapping = {
            "text-classification": "TextClassificationPipeline",
            "token-classification": "TokenClassificationPipeline",
            "question-answering": "QuestionAnsweringPipeline",
            "fill-mask": "FillMaskPipeline",
            "text2text-generation": "Text2TextGenerationPipeline",
            "summarization": "SummarizationPipeline",
            "translation": "TranslationPipeline",
            "text-generation": "TextGenerationPipeline",
            "zero-shot-classification": "ZeroShotClassificationPipeline",
            "feature-extraction": "FeatureExtractionPipeline",
            "image-feature-extraction": "ImageFeatureExtractionPipeline",
            "audio-classification": "AudioClassificationPipeline",
            "zero-shot-audio-classification": "ZeroShotAudioClassificationPipeline",
            "automatic-speech-recognition": "AutomaticSpeechRecognitionPipeline",
            "image-to-text": "ImageToTextPipeline",
            "image-classification": "ImageClassificationPipeline",
            "image-segmentation": "ImageSegmentationPipeline",
            "background-removal": "BackgroundRemovalPipeline",
            "zero-shot-image-classification": "ZeroShotImageClassificationPipeline",
            "object-detection": "ObjectDetectionPipeline",
            "zero-shot-object-detection": "ZeroShotObjectDetectionPipeline",
            "document-question-answering": "DocumentQuestionAnsweringPipeline",
            "text-to-audio": "TextToAudioPipeline",
            "image-to-image": "ImageToImagePipeline",
            "depth-estimation": "DepthEstimationPipeline",
        }

        pipeline_class = pipeline_class_mapping.get(pipeline_tag)
        if not pipeline_class:
            return base_url

        return f"{base_url}#module_pipelines.{pipeline_class}"

    def _normalize_pipeline_tag(self, pipeline_tag: Optional[str]) -> Optional[str]:
        """Normalize pipeline tag to match expected task names.

        Some pipeline tags use abbreviations that need to be expanded
        for the conversion script to recognize them.

        Args:
            pipeline_tag: Original pipeline tag from model metadata

        Returns:
            Optional[str]: Normalized task name, or None if input is None
        """
        if not pipeline_tag:
            return None

        # Map abbreviated tags to their full names
        tag_synonyms = {
            "vqa": "visual-question-answering",
        }

        return tag_synonyms.get(pipeline_tag, pipeline_tag)

    # ============================================================================
    # Model Conversion Methods
    # ============================================================================

    def setup_repository(self) -> None:
        """Verify that the transformers.js repository exists.

        Raises:
            RuntimeError: If the repository is not found at the expected path
        """
        if not self.config.repo_path.exists():
            raise RuntimeError(
                f"Expected transformers.js repository at {self.config.repo_path} "
                f"but it was not found."
            )

    def _run_conversion_subprocess(
        self, input_model_id: str, extra_args: Optional[List[str]] = None
    ) -> subprocess.CompletedProcess:
        """Execute the ONNX conversion script as a subprocess.

        Args:
            input_model_id: Hugging Face model ID to convert
            extra_args: Additional command-line arguments for the conversion script

        Returns:
            subprocess.CompletedProcess: Result of the subprocess execution
        """
        # Build the conversion command
        command = [
            sys.executable,
            "-m",
            "scripts.convert",
            "--quantize",
            "--model_id",
            input_model_id,
        ]

        if extra_args:
            command.extend(extra_args)

        # Run conversion in the transformers.js repository directory
        return subprocess.run(
            command,
            cwd=self.config.repo_path,
            capture_output=True,
            text=True,
            env={
                "HF_TOKEN": self.config.hf_token,
            },
        )

    def convert_model(
        self,
        input_model_id: str,
        trust_remote_code: bool = False,
        output_attentions: bool = False,
    ) -> Tuple[bool, Optional[str]]:
        """Convert a Hugging Face model to ONNX format.

        Args:
            input_model_id: Hugging Face model repository ID
            trust_remote_code: Whether to trust and execute remote code from the model
            output_attentions: Whether to output attention weights (required for some tasks)

        Returns:
            Tuple containing:
            - bool: True if conversion succeeded, False otherwise
            - Optional[str]: Error message if failed, or conversion log if succeeded
        """
        try:
            conversion_args: List[str] = []

            # Handle trust_remote_code option (requires user token for security)
            if trust_remote_code:
                if not self.config.is_using_user_token:
                    raise Exception(
                        "Trust Remote Code requires your own HuggingFace token."
                    )
                conversion_args.append("--trust_remote_code")

            # Handle output_attentions option (needed for word-level timestamps in Whisper)
            if output_attentions:
                conversion_args.append("--output_attentions")

            # Try to infer the task from model metadata and pass it to the conversion script
            # This helps the script choose the right export configuration
            try:
                info = model_info(repo_id=input_model_id, token=self.config.hf_token)
                pipeline_tag = getattr(info, "pipeline_tag", None)
                task = self._normalize_pipeline_tag(pipeline_tag)
                if task:
                    conversion_args.extend(["--task", task])
            except Exception:
                # If we can't fetch the task, continue without it
                # The conversion script will try to infer it automatically
                pass

            # Run the conversion
            result = self._run_conversion_subprocess(
                input_model_id, extra_args=conversion_args or None
            )

            # Check if conversion succeeded
            if result.returncode != 0:
                return False, result.stderr

            return True, result.stderr

        except Exception as e:
            return False, str(e)

    # ============================================================================
    # Upload Methods
    # ============================================================================

    def upload_model(self, input_model_id: str, output_model_id: str) -> Optional[str]:
        """Upload the converted ONNX model to Hugging Face Hub.

        This method:
        1. Creates the target repository (if it doesn't exist)
        2. Generates an enhanced README with merged metadata
        3. Uploads all model files to the repository
        4. Cleans up local files after upload

        Args:
            input_model_id: Original model repository ID
            output_model_id: Target repository ID for the ONNX model

        Returns:
            Optional[str]: Error message if upload failed, None if successful
        """
        model_folder_path = self.config.repo_path / "models" / input_model_id

        try:
            # Create the target repository (public by default)
            self.api.create_repo(output_model_id, exist_ok=True, private=False)

            # Generate and write the enhanced README
            readme_path = model_folder_path / "README.md"
            readme_content = self.generate_readme(input_model_id)
            readme_path.write_text(readme_content, encoding="utf-8")

            # Upload all files from the model folder
            self.api.upload_folder(
                folder_path=str(model_folder_path), repo_id=output_model_id
            )

            return None  # Success

        except Exception as e:
            return str(e)
        finally:
            # Always clean up local files, even if upload failed
            shutil.rmtree(model_folder_path, ignore_errors=True)

    # ============================================================================
    # README Generation Methods
    # ============================================================================

    def generate_readme(self, input_model_id: str) -> str:
        """Generate an enhanced README for the ONNX model.

        This method creates a README that:
        1. Merges metadata from the original model with ONNX-specific metadata
        2. Adds a description and link to the conversion space
        3. Includes usage instructions with links to Transformers.js docs
        4. Appends the original model's README content

        Args:
            input_model_id: Original model repository ID

        Returns:
            str: Complete README content in Markdown format with YAML frontmatter
        """
        # Fetch pipeline tag from model metadata (if available)
        try:
            info = model_info(repo_id=input_model_id, token=self.config.hf_token)
            pipeline_tag = getattr(info, "pipeline_tag", None)
        except Exception:
            pipeline_tag = None

        # Fetch and parse the original README
        original_text = self._fetch_original_readme(input_model_id)
        original_meta, original_body = self._extract_yaml_frontmatter(original_text)
        original_body = (
            original_body or self._strip_yaml_frontmatter(original_text)
        ).strip()

        # Merge original metadata with our ONNX-specific metadata (ours take precedence)
        merged_meta = {}
        if isinstance(original_meta, dict):
            merged_meta.update(original_meta)
        merged_meta["library_name"] = "transformers.js"
        merged_meta["base_model"] = [input_model_id]
        if pipeline_tag is not None:
            merged_meta["pipeline_tag"] = pipeline_tag

        # Generate YAML frontmatter
        frontmatter_yaml = yaml.safe_dump(merged_meta, sort_keys=False).strip()
        header = f"---\n{frontmatter_yaml}\n---\n\n"

        # Build README sections
        readme_sections: List[str] = []
        readme_sections.append(header)

        # Add title
        model_name = input_model_id.split("/")[-1]
        readme_sections.append(f"# {model_name} (ONNX)\n")

        # Add description
        readme_sections.append(
            f"This is an ONNX version of [{input_model_id}](https://huggingface.co/{input_model_id}). "
            "It was automatically converted and uploaded using "
            "[this Hugging Face Space](https://huggingface.co/spaces/onnx-community/convert-to-onnx)."
        )

        # Add usage section with Transformers.js docs link
        docs_url = self._get_pipeline_docs_url(pipeline_tag)
        if docs_url:
            readme_sections.append("\n## Usage with Transformers.js\n")
            if pipeline_tag:
                readme_sections.append(
                    f"See the pipeline documentation for `{pipeline_tag}`: {docs_url}"
                )
            else:
                readme_sections.append(f"See the pipelines documentation: {docs_url}")

        # Append original README content (if available)
        if original_body:
            readme_sections.append("\n---\n")
            readme_sections.append(original_body)

        return "\n\n".join(readme_sections) + "\n"


def main():
    """Main application entry point for the Streamlit interface.

    This function:
    1. Initializes configuration and converter
    2. Displays the UI for model input and options
    3. Handles the conversion workflow
    4. Shows progress and results to the user
    """
    st.write("## Convert a Hugging Face model to ONNX")

    try:
        # Initialize configuration and converter
        config = Config.from_env()
        converter = ModelConverter(config)
        converter.setup_repository()

        # Get model ID from user
        input_model_id = st.text_input(
            "Enter the Hugging Face model ID to convert. Example: `EleutherAI/pythia-14m`"
        )

        if not input_model_id:
            return

        # Optional: User token input
        st.text_input(
            f"Optional: Your Hugging Face write token. Fill it if you want to upload the model under your account.",
            type="password",
            key="user_hf_token",
        )

        # Optional: Trust remote code toggle (requires user token)
        trust_remote_code = st.toggle("Optional: Trust Remote Code.")
        if trust_remote_code:
            st.warning(
                "This option should only be enabled for repositories you trust and in which you have read the code, as it will execute arbitrary code present in the model repository. When this option is enabled, you must use your own Hugging Face write token."
            )

        # Optional: Output attentions (for Whisper models)
        output_attentions = False
        if "whisper" in input_model_id.lower():
            output_attentions = st.toggle(
                "Whether to output attentions from the Whisper model. This is required for word-level (token) timestamps."
            )

        # Determine output repository
        # If user owns the model, allow uploading to the same repo
        if config.hf_username == input_model_id.split("/")[0]:
            same_repo = st.checkbox(
                "Upload the ONNX weights to the existing repository"
            )
        else:
            same_repo = False

        model_name = input_model_id.split("/")[-1]
        output_model_id = f"{config.hf_username}/{model_name}"

        # Add -ONNX suffix if creating a new repository
        if not same_repo:
            output_model_id += "-ONNX"

        output_model_url = f"{config.hf_base_url}/{output_model_id}"

        # Check if model already exists
        if not same_repo and converter.api.repo_exists(output_model_id):
            st.write("This model has already been converted! 🎉")
            st.link_button(f"Go to {output_model_id}", output_model_url, type="primary")
            return

        # Show where the model will be uploaded
        st.write(f"URL where the model will be converted and uploaded to:")
        st.code(output_model_url, language="plaintext")

        # Wait for user confirmation before proceeding
        if not st.button(label="Proceed", type="primary"):
            return

        # Step 1: Convert the model to ONNX
        with st.spinner("Converting model..."):
            success, stderr = converter.convert_model(
                input_model_id,
                trust_remote_code=trust_remote_code,
                output_attentions=output_attentions,
            )
            if not success:
                st.error(f"Conversion failed: {stderr}")
                return

            st.success("Conversion successful!")
            st.code(stderr)

        # Step 2: Upload the converted model to Hugging Face
        with st.spinner("Uploading model..."):
            error = converter.upload_model(input_model_id, output_model_id)
            if error:
                st.error(f"Upload failed: {error}")
                return

            st.success("Upload successful!")
            st.write("You can now go and view the model on Hugging Face!")
            st.link_button(f"Go to {output_model_id}", output_model_url, type="primary")

    except Exception as e:
        logger.exception("Application error")
        st.error(f"An error occurred: {str(e)}")


if __name__ == "__main__":
    main()