Spaces:
				
			
			
	
			
			
		Running
		
			on 
			
			Zero
	
	
	
			
			
	
	
	
	
		
		
		Running
		
			on 
			
			Zero
	
		bo.l
		
	commited on
		
		
					Commit 
							
							·
						
						79465f9
	
1
								Parent(s):
							
							f1138b7
								
init model
Browse files
    	
        app.py
    CHANGED
    
    | 
         @@ -7,7 +7,6 @@ from PIL import Image 
     | 
|
| 7 | 
         
             
            from kontext.pipeline_flux_kontext import FluxKontextPipeline
         
     | 
| 8 | 
         
             
            from kontext.scheduling_flow_match_euler_discrete import FlowMatchEulerDiscreteScheduler
         
     | 
| 9 | 
         
             
            from diffusers import FluxTransformer2DModel
         
     | 
| 10 | 
         
            -
            import torch
         
     | 
| 11 | 
         
             
            from huggingface_hub import hf_hub_download
         
     | 
| 12 | 
         
             
            from safetensors.torch import load_file
         
     | 
| 13 | 
         | 
| 
         @@ -57,7 +56,7 @@ flux_pipeline.text_encoder_2.to(device).to(torch.bfloat16) 
     | 
|
| 57 | 
         | 
| 58 | 
         
             
            # 替换 transformer 权重
         
     | 
| 59 | 
         
             
            ckpt_path = hf_hub_download("NoobDoge/Multi_Ref_Model", "full_ema_model.safetensors")
         
     | 
| 60 | 
         
            -
            flux_pipeline.transformer.from_single_file(ckpt_path, torch_dtype=torch.bfloat16)
         
     | 
| 61 | 
         
             
            flux_pipeline.transformer.to(device).to(torch.bfloat16)
         
     | 
| 62 | 
         | 
| 63 | 
         
             
            # ---------------------------
         
     | 
| 
         @@ -100,9 +99,6 @@ def infer( 
     | 
|
| 100 | 
         
             
                # 参考图按桶缩放
         
     | 
| 101 | 
         
             
                raw_images = resize_by_bucket(refs, resolution=MAX_IMAGE_SIZE)
         
     | 
| 102 | 
         | 
| 103 | 
         
            -
                if len(raw_images) == 2:
         
     | 
| 104 | 
         
            -
                    raw_images = [[raw_images[0]],[raw_images[1]]]
         
     | 
| 105 | 
         
            -
             
     | 
| 106 | 
         
             
                # 推理
         
     | 
| 107 | 
         
             
                with torch.no_grad():
         
     | 
| 108 | 
         
             
                    out = flux_pipeline(
         
     | 
| 
         | 
|
| 7 | 
         
             
            from kontext.pipeline_flux_kontext import FluxKontextPipeline
         
     | 
| 8 | 
         
             
            from kontext.scheduling_flow_match_euler_discrete import FlowMatchEulerDiscreteScheduler
         
     | 
| 9 | 
         
             
            from diffusers import FluxTransformer2DModel
         
     | 
| 
         | 
|
| 10 | 
         
             
            from huggingface_hub import hf_hub_download
         
     | 
| 11 | 
         
             
            from safetensors.torch import load_file
         
     | 
| 12 | 
         | 
| 
         | 
|
| 56 | 
         | 
| 57 | 
         
             
            # 替换 transformer 权重
         
     | 
| 58 | 
         
             
            ckpt_path = hf_hub_download("NoobDoge/Multi_Ref_Model", "full_ema_model.safetensors")
         
     | 
| 59 | 
         
            +
            flux_pipeline.transformer = FluxTransformer2DModel.from_single_file(ckpt_path, torch_dtype=torch.bfloat16)
         
     | 
| 60 | 
         
             
            flux_pipeline.transformer.to(device).to(torch.bfloat16)
         
     | 
| 61 | 
         | 
| 62 | 
         
             
            # ---------------------------
         
     | 
| 
         | 
|
| 99 | 
         
             
                # 参考图按桶缩放
         
     | 
| 100 | 
         
             
                raw_images = resize_by_bucket(refs, resolution=MAX_IMAGE_SIZE)
         
     | 
| 101 | 
         | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 102 | 
         
             
                # 推理
         
     | 
| 103 | 
         
             
                with torch.no_grad():
         
     | 
| 104 | 
         
             
                    out = flux_pipeline(
         
     | 
    	
        kontext/__pycache__/pipeline_flux_kontext.cpython-311.pyc
    CHANGED
    
    | 
         Binary files a/kontext/__pycache__/pipeline_flux_kontext.cpython-311.pyc and b/kontext/__pycache__/pipeline_flux_kontext.cpython-311.pyc differ 
     | 
| 
         | 
    	
        kontext/__pycache__/scheduling_flow_match_euler_discrete.cpython-311.pyc
    CHANGED
    
    | 
         Binary files a/kontext/__pycache__/scheduling_flow_match_euler_discrete.cpython-311.pyc and b/kontext/__pycache__/scheduling_flow_match_euler_discrete.cpython-311.pyc differ 
     | 
| 
         | 
    	
        test_model.ipynb
    ADDED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         |