Spaces:
				
			
			
	
			
			
		Running
		
			on 
			
			Zero
	
	
	
			
			
	
	
	
	
		
		
		Running
		
			on 
			
			Zero
	Update app.py
Browse files
    	
        app.py
    CHANGED
    
    | @@ -19,19 +19,18 @@ os.makedirs('./temp', exist_ok=True) | |
| 19 |  | 
| 20 | 
             
            print('\n\n\n')
         | 
| 21 | 
             
            print('Loading model...')
         | 
| 22 | 
            -
             | 
| 23 | 
            -
             | 
| 24 | 
            -
             | 
| 25 | 
            -
             | 
| 26 | 
            -
             | 
| 27 | 
            -
             | 
| 28 | 
             
            gpu_pipe = transformers.pipeline(
         | 
| 29 | 
             
                'text-generation', 
         | 
| 30 | 
             
                model='dx2102/llama-midi',
         | 
| 31 | 
             
                torch_dtype='bfloat16',
         | 
| 32 | 
             
                device='cuda:0',
         | 
| 33 | 
             
            )
         | 
| 34 | 
            -
            cpu_pipe = gpu_pipe
         | 
| 35 | 
             
            # print devices
         | 
| 36 | 
             
            print(f"{gpu_pipe.device = }, {gpu_pipe.model.device = }")
         | 
| 37 | 
             
            print(f"{cpu_pipe.device = }, {cpu_pipe.model.device = }")
         | 
| @@ -58,6 +57,8 @@ example_prefix = '''pitch duration wait velocity instrument | |
| 58 | 
             
            69 1970 0 20 0
         | 
| 59 | 
             
            48 330 350 20 0
         | 
| 60 | 
             
            '''
         | 
|  | |
|  | |
| 61 |  | 
| 62 |  | 
| 63 |  | 
|  | |
| 19 |  | 
| 20 | 
             
            print('\n\n\n')
         | 
| 21 | 
             
            print('Loading model...')
         | 
| 22 | 
            +
            cpu_pipe = transformers.pipeline(
         | 
| 23 | 
            +
                'text-generation', 
         | 
| 24 | 
            +
                model='dx2102/llama-midi',
         | 
| 25 | 
            +
                torch_dtype='float32',
         | 
| 26 | 
            +
                device='cpu',
         | 
| 27 | 
            +
            )
         | 
| 28 | 
             
            gpu_pipe = transformers.pipeline(
         | 
| 29 | 
             
                'text-generation', 
         | 
| 30 | 
             
                model='dx2102/llama-midi',
         | 
| 31 | 
             
                torch_dtype='bfloat16',
         | 
| 32 | 
             
                device='cuda:0',
         | 
| 33 | 
             
            )
         | 
|  | |
| 34 | 
             
            # print devices
         | 
| 35 | 
             
            print(f"{gpu_pipe.device = }, {gpu_pipe.model.device = }")
         | 
| 36 | 
             
            print(f"{cpu_pipe.device = }, {cpu_pipe.model.device = }")
         | 
|  | |
| 57 | 
             
            69 1970 0 20 0
         | 
| 58 | 
             
            48 330 350 20 0
         | 
| 59 | 
             
            '''
         | 
| 60 | 
            +
            print('cpu:', cpu_pipe(example_prefix, max_new_tokens=10)[0]['generated_text'])
         | 
| 61 | 
            +
            print('gpu:', gpu_pipe(example_prefix, max_new_tokens=10)[0]['generated_text'])
         | 
| 62 |  | 
| 63 |  | 
| 64 |  |