Spaces:
				
			
			
	
			
			
					
		Running
		
			on 
			
			CPU Upgrade
	
	
	
			
			
	
	
	
	
		
		
					
		Running
		
			on 
			
			CPU Upgrade
	Update src/app.py
Browse files- src/app.py +11 -1
 
    	
        src/app.py
    CHANGED
    
    | 
         @@ -19,7 +19,17 @@ def get_results(model_name: str, library: str, options: list, access_token: str) 
     | 
|
| 19 | 
         
             
            with gr.Blocks() as demo:
         
     | 
| 20 | 
         
             
                with gr.Column():
         
     | 
| 21 | 
         
             
                    gr.Markdown(
         
     | 
| 22 | 
         
            -
                        " 
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 23 | 
         
             
                    )
         
     | 
| 24 | 
         
             
                    out_text = gr.Markdown()
         
     | 
| 25 | 
         
             
                    out = gr.DataFrame(
         
     | 
| 
         | 
|
| 19 | 
         
             
            with gr.Blocks() as demo:
         
     | 
| 20 | 
         
             
                with gr.Column():
         
     | 
| 21 | 
         
             
                    gr.Markdown(
         
     | 
| 22 | 
         
            +
                        """<img src="https://huggingface.co/spaces/hf-accelerate/model-memory-usage/resolve/main/measure_model_size.png" style="float: left;" width="250" height="250"><h1>🤗 Model Memory Calculator</h1>
         
     | 
| 23 | 
         
            +
                This tool will help you calculate how much vRAM is needed to train and perform big model inference
         
     | 
| 24 | 
         
            +
                on a model hosted on the 🤗 Hugging Face Hub. The minimum recommended vRAM needed for a model
         
     | 
| 25 | 
         
            +
                is denoted as the size of the "largest layer", and training of a model is roughly 4x its size (for Adam).
         
     | 
| 26 | 
         
            +
                These calculations are accurate within a few percent at most, such as `bert-base-cased` being 413.68 MB and the calculator estimating 413.18 MB.
         
     | 
| 27 | 
         
            +
                When performing inference, expect to add up to an additional 20% to this as found by [EleutherAI](https://blog.eleuther.ai/transformer-math/). 
         
     | 
| 28 | 
         
            +
                More tests will be performed in the future to get a more accurate benchmark for each model.
         
     | 
| 29 | 
         
            +
                Currently this tool supports all models hosted that use `transformers` and `timm`.
         
     | 
| 30 | 
         
            +
                To use this tool pass in the URL or model name of the model you want to calculate the memory usage for,
         
     | 
| 31 | 
         
            +
                select which framework it originates from ("auto" will try and detect it from the model metadata), and
         
     | 
| 32 | 
         
            +
                what precisions you want to use."""
         
     | 
| 33 | 
         
             
                    )
         
     | 
| 34 | 
         
             
                    out_text = gr.Markdown()
         
     | 
| 35 | 
         
             
                    out = gr.DataFrame(
         
     |