Datasets:

ArXiv:
diffusers-benchmarking-bot commited on
Commit
acbe300
·
verified ·
1 Parent(s): 13f7e4c

Upload folder using huggingface_hub

Browse files
main/matryoshka.py CHANGED
@@ -1475,11 +1475,8 @@ class MatryoshkaFusedAttnProcessor2_0:
1475
  fused projection layers. For self-attention modules, all projection matrices (i.e., query, key, value) are fused.
1476
  For cross-attention modules, key and value projection matrices are fused.
1477
 
1478
- <Tip warning={true}>
1479
-
1480
- This API is currently 🧪 experimental in nature and can change in future.
1481
-
1482
- </Tip>
1483
  """
1484
 
1485
  def __init__(self):
@@ -2696,11 +2693,8 @@ class MatryoshkaUNet2DConditionModel(
2696
  Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
2697
  are fused. For cross-attention modules, key and value projection matrices are fused.
2698
 
2699
- <Tip warning={true}>
2700
-
2701
- This API is 🧪 experimental.
2702
-
2703
- </Tip>
2704
  """
2705
  self.original_attn_processors = None
2706
 
@@ -2719,11 +2713,8 @@ class MatryoshkaUNet2DConditionModel(
2719
  def unfuse_qkv_projections(self):
2720
  """Disables the fused QKV projection if enabled.
2721
 
2722
- <Tip warning={true}>
2723
-
2724
- This API is 🧪 experimental.
2725
-
2726
- </Tip>
2727
 
2728
  """
2729
  if self.original_attn_processors is not None:
 
1475
  fused projection layers. For self-attention modules, all projection matrices (i.e., query, key, value) are fused.
1476
  For cross-attention modules, key and value projection matrices are fused.
1477
 
1478
+ > [!WARNING]
1479
+ > This API is currently 🧪 experimental in nature and can change in future.
 
 
 
1480
  """
1481
 
1482
  def __init__(self):
 
2693
  Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
2694
  are fused. For cross-attention modules, key and value projection matrices are fused.
2695
 
2696
+ > [!WARNING]
2697
+ > This API is 🧪 experimental.
 
 
 
2698
  """
2699
  self.original_attn_processors = None
2700
 
 
2713
  def unfuse_qkv_projections(self):
2714
  """Disables the fused QKV projection if enabled.
2715
 
2716
+ > [!WARNING]
2717
+ > This API is 🧪 experimental.
 
 
 
2718
 
2719
  """
2720
  if self.original_attn_processors is not None:
main/pipeline_stable_diffusion_boxdiff.py CHANGED
@@ -948,11 +948,8 @@ class StableDiffusionBoxDiffPipeline(
948
  Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query,
949
  key, value) are fused. For cross-attention modules, key and value projection matrices are fused.
950
 
951
- <Tip warning={true}>
952
-
953
- This API is 🧪 experimental.
954
-
955
- </Tip>
956
 
957
  Args:
958
  unet (`bool`, defaults to `True`): To apply fusion on the UNet.
@@ -978,11 +975,8 @@ class StableDiffusionBoxDiffPipeline(
978
  def unfuse_qkv_projections(self, unet: bool = True, vae: bool = True):
979
  """Disable QKV projection fusion if enabled.
980
 
981
- <Tip warning={true}>
982
-
983
- This API is 🧪 experimental.
984
-
985
- </Tip>
986
 
987
  Args:
988
  unet (`bool`, defaults to `True`): To apply fusion on the UNet.
 
948
  Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query,
949
  key, value) are fused. For cross-attention modules, key and value projection matrices are fused.
950
 
951
+ > [!WARNING]
952
+ > This API is 🧪 experimental.
 
 
 
953
 
954
  Args:
955
  unet (`bool`, defaults to `True`): To apply fusion on the UNet.
 
975
  def unfuse_qkv_projections(self, unet: bool = True, vae: bool = True):
976
  """Disable QKV projection fusion if enabled.
977
 
978
+ > [!WARNING]
979
+ > This API is 🧪 experimental.
 
 
 
980
 
981
  Args:
982
  unet (`bool`, defaults to `True`): To apply fusion on the UNet.
main/pipeline_stable_diffusion_pag.py CHANGED
@@ -940,9 +940,8 @@ class StableDiffusionPAGPipeline(
940
  """
941
  Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query,
942
  key, value) are fused. For cross-attention modules, key and value projection matrices are fused.
943
- <Tip warning={true}>
944
- This API is 🧪 experimental.
945
- </Tip>
946
  Args:
947
  unet (`bool`, defaults to `True`): To apply fusion on the UNet.
948
  vae (`bool`, defaults to `True`): To apply fusion on the VAE.
@@ -966,9 +965,8 @@ class StableDiffusionPAGPipeline(
966
  # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.unfuse_qkv_projections
967
  def unfuse_qkv_projections(self, unet: bool = True, vae: bool = True):
968
  """Disable QKV projection fusion if enabled.
969
- <Tip warning={true}>
970
- This API is 🧪 experimental.
971
- </Tip>
972
  Args:
973
  unet (`bool`, defaults to `True`): To apply fusion on the UNet.
974
  vae (`bool`, defaults to `True`): To apply fusion on the VAE.
 
940
  """
941
  Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query,
942
  key, value) are fused. For cross-attention modules, key and value projection matrices are fused.
943
+ > [!WARNING]
944
+ > This API is 🧪 experimental.
 
945
  Args:
946
  unet (`bool`, defaults to `True`): To apply fusion on the UNet.
947
  vae (`bool`, defaults to `True`): To apply fusion on the VAE.
 
965
  # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.unfuse_qkv_projections
966
  def unfuse_qkv_projections(self, unet: bool = True, vae: bool = True):
967
  """Disable QKV projection fusion if enabled.
968
+ > [!WARNING]
969
+ > This API is 🧪 experimental.
 
970
  Args:
971
  unet (`bool`, defaults to `True`): To apply fusion on the UNet.
972
  vae (`bool`, defaults to `True`): To apply fusion on the VAE.