From e1dff8c5a9829c4adb026e78f016b22535bffdd1 Mon Sep 17 00:00:00 2001 From: kabachuha Date: Fri, 28 Apr 2023 13:55:36 +0300 Subject: [PATCH] autodisable xformers if using torch2 in readme example It's really strange to ask the users to perform a code editing action if it can be accomplished with a simple check --- README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 86087fe..866e7a8 100644 --- a/README.md +++ b/README.md @@ -75,9 +75,6 @@ And we can now run the model locally. By default `diffusers` makes use of [model cpu offloading](https://huggingface.co/docs/diffusers/optimization/fp16#model-offloading-for-fast-inference-and-memory-savings) to run the whole IF pipeline with as little as 14 GB of VRAM. -If you are using `torch>=2.0.0`, make sure to **delete all** `enable_xformers_memory_efficient_attention()` -functions. - ```py from diffusers import DiffusionPipeline from diffusers.utils import pt_to_pil @@ -85,22 +82,25 @@ import torch # stage 1 stage_1 = DiffusionPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16) -stage_1.enable_xformers_memory_efficient_attention() # remove line if torch.__version__ >= 2.0.0 stage_1.enable_model_cpu_offload() # stage 2 stage_2 = DiffusionPipeline.from_pretrained( "DeepFloyd/IF-II-L-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16 ) -stage_2.enable_xformers_memory_efficient_attention() # remove line if torch.__version__ >= 2.0.0 stage_2.enable_model_cpu_offload() # stage 3 safety_modules = {"feature_extractor": stage_1.feature_extractor, "safety_checker": stage_1.safety_checker, "watermarker": stage_1.watermarker} stage_3 = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-x4-upscaler", **safety_modules, torch_dtype=torch.float16) -stage_3.enable_xformers_memory_efficient_attention() # remove line if torch.__version__ >= 2.0.0 stage_3.enable_model_cpu_offload() +# xformers memory efficient attention shouldn't be used with PyTorch2 +if not torch.__version__.startswith('2'): + stage_1.enable_xformers_memory_efficient_attention() + stage_2.enable_xformers_memory_efficient_attention() + stage_3.enable_xformers_memory_efficient_attention() + prompt = 'a photo of a kangaroo wearing an orange hoodie and blue sunglasses standing in front of the eiffel tower holding a sign that says "very deep learning"' # text embeds