-
First, confirm
What happened?Using Automatic1111. On a models, based on SDXL 1.0, generates only first image. On a second attempt getting CUDA out of memory error. Steps to reproduce the problem
Sysinfo{ Relevant console log*** Error completing request
*** Arguments: ('task(gf0xstiwbbl84ye)', 'a portrait of brutal warrior', '(deformed, distorted, disfigured:1.3), doll, poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation', [], 30, 'DPM++ 2M SDE Heun Karras', 1, 1, 7, 512, 512, False, 0.7, 2, 'Latent', 0, 0, 0, 'Use same checkpoint', 'Use same sampler', '', '', [], <gradio.routes.Request object at 0x000001DABBFF6B90>, 0, False, '', 0.8, -1, False, -1, 0, 0, 0, False, False, {'ad_model': 'face_yolov8n.pt', 'ad_prompt': '', 'ad_negative_prompt': '', 'ad_confidence': 0.3, 'ad_mask_k_largest': 0, 'ad_mask_min_ratio': 0, 'ad_mask_max_ratio': 1, 'ad_x_offset': 0, 'ad_y_offset': 0, 'ad_dilate_erode': 4, 'ad_mask_merge_invert': 'None', 'ad_mask_blur': 4, 'ad_denoising_strength': 0.4, 'ad_inpaint_only_masked': True, 'ad_inpaint_only_masked_padding': 32, 'ad_use_inpaint_width_height': False, 'ad_inpaint_width': 512, 'ad_inpaint_height': 512, 'ad_use_steps': False, 'ad_steps': 28, 'ad_use_cfg_scale': False, 'ad_cfg_scale': 7, 'ad_use_checkpoint': False, 'ad_checkpoint': 'Use same checkpoint', 'ad_use_vae': False, 'ad_vae': 'Use same VAE', 'ad_use_sampler': False, 'ad_sampler': 'Euler a', 'ad_use_noise_multiplier': False, 'ad_noise_multiplier': 1, 'ad_use_clip_skip': False, 'ad_clip_skip': 1, 'ad_restore_face': False, 'ad_controlnet_model': 'None', 'ad_controlnet_module': 'None', 'ad_controlnet_weight': 1, 'ad_controlnet_guidance_start': 0, 'ad_controlnet_guidance_end': 1, 'is_api': ()}, {'ad_model': 'None', 'ad_prompt': '', 'ad_negative_prompt': '', 'ad_confidence': 0.3, 'ad_mask_k_largest': 0, 'ad_mask_min_ratio': 0, 'ad_mask_max_ratio': 1, 'ad_x_offset': 0, 'ad_y_offset': 0, 'ad_dilate_erode': 4, 'ad_mask_merge_invert': 'None', 'ad_mask_blur': 4, 'ad_denoising_strength': 0.4, 'ad_inpaint_only_masked': True, 'ad_inpaint_only_masked_padding': 32, 'ad_use_inpaint_width_height': False, 'ad_inpaint_width': 512, 'ad_inpaint_height': 512, 'ad_use_steps': False, 'ad_steps': 28, 'ad_use_cfg_scale': False, 'ad_cfg_scale': 7, 'ad_use_checkpoint': False, 'ad_checkpoint': 'Use same checkpoint', 'ad_use_vae': False, 'ad_vae': 'Use same VAE', 'ad_use_sampler': False, 'ad_sampler': 'Euler a', 'ad_use_noise_multiplier': False, 'ad_noise_multiplier': 1, 'ad_use_clip_skip': False, 'ad_clip_skip': 1, 'ad_restore_face': False, 'ad_controlnet_model': 'None', 'ad_controlnet_module': 'None', 'ad_controlnet_weight': 1, 'ad_controlnet_guidance_start': 0, 'ad_controlnet_guidance_end': 1, 'is_api': ()}, UiControlNetUnit(enabled=False, module='none', model='None', weight=1, image=None, resize_mode='Crop and Resize', low_vram=False, processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced', save_detected_map=True), UiControlNetUnit(enabled=False, module='none', model='None', weight=1, image=None, resize_mode='Crop and Resize', low_vram=False, processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced', save_detected_map=True), UiControlNetUnit(enabled=False, module='none', model='None', weight=1, image=None, resize_mode='Crop and Resize', low_vram=False, processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced', save_detected_map=True), <PIL.Image.Image image mode=RGB size=568x800 at 0x1DABBFA3460>, True, '0', '0', 'inswapper_128.onnx', 'CodeFormer', 1, True, 'None', 1, 1, False, True, 1, 0, 0, False, 0.5, True, False, 'CUDA', False, 0, 'None', False, '1:1,1:2,1:2', '0:0,0:0,0:1', '0.2,0.8,0.8', 150, 0.2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, False, False, 'positive', 'comma', 0, False, False, '', 1, '', [], 0, '', [], 0, '', [], True, False, False, False, 0, False, None, None, False, None, None, False, None, None, False, 50, True, False, 0, 'Range', 1, 'GPU', True, False, False, False, False, 0, 512, False, 512, False, False, 3, False, 3, True, 3, False, 'Horizontal', False, False, 'u2net', False, True, True, False, 0, 2.5, 'polylines_sharp', ['left-right', 'red-cyan-anaglyph'], 2, 0, '∯boost∯clipdepth∯clipdepth_far∯clipdepth_mode∯clipdepth_near∯compute_device∯do_output_depth∯gen_normalmap∯gen_rembg∯gen_simple_mesh∯gen_stereo∯model_type∯net_height∯net_size_match∯net_width∯normalmap_invert∯normalmap_post_blur∯normalmap_post_blur_kernel∯normalmap_pre_blur∯normalmap_pre_blur_kernel∯normalmap_sobel∯normalmap_sobel_kernel∯output_depth_combine∯output_depth_combine_axis∯output_depth_invert∯pre_depth_background_removal∯rembg_model∯save_background_removal_masks∯save_outputs∯simple_mesh_occlude∯simple_mesh_spherical∯stereo_balance∯stereo_divergence∯stereo_fill_algo∯stereo_modes∯stereo_offset_exponent∯stereo_separation') {}
Traceback (most recent call last):
File "D:\stable diffusion gui\stable-diffusion-webui\modules\call_queue.py", line 57, in f
res = list(func(*args, **kwargs))
File "D:\stable diffusion gui\stable-diffusion-webui\modules\call_queue.py", line 36, in f
res = func(*args, **kwargs)
File "D:\stable diffusion gui\stable-diffusion-webui\modules\txt2img.py", line 55, in txt2img
processed = processing.process_images(p)
File "D:\stable diffusion gui\stable-diffusion-webui\modules\processing.py", line 732, in process_images
res = process_images_inner(p)
File "D:\stable diffusion gui\stable-diffusion-webui\extensions\sd-webui-controlnet\scripts\batch_hijack.py", line 42, in processing_process_images_hijack
return getattr(processing, '__controlnet_original_process_images_inner')(p, *args, **kwargs)
File "D:\stable diffusion gui\stable-diffusion-webui\modules\processing.py", line 867, in process_images_inner
samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)
File "D:\stable diffusion gui\stable-diffusion-webui\modules\processing.py", line 1140, in sample
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
File "D:\stable diffusion gui\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 235, in sample
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
File "D:\stable diffusion gui\stable-diffusion-webui\modules\sd_samplers_common.py", line 261, in launch_sampling
return func()
File "D:\stable diffusion gui\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 235, in <lambda>
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
File "D:\stable diffusion gui\stable-diffusion-webui\venv\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "D:\stable diffusion gui\stable-diffusion-webui\repositories\k-diffusion\k_diffusion\sampling.py", line 626, in sample_dpmpp_2m_sde
denoised = model(x, sigmas[i] * s_in, **extra_args)
File "D:\stable diffusion gui\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "D:\stable diffusion gui\stable-diffusion-webui\modules\sd_samplers_cfg_denoiser.py", line 169, in forward
x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict(cond_in, image_cond_in))
File "D:\stable diffusion gui\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "D:\stable diffusion gui\stable-diffusion-webui\repositories\k-diffusion\k_diffusion\external.py", line 112, in forward
eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), **kwargs)
File "D:\stable diffusion gui\stable-diffusion-webui\repositories\k-diffusion\k_diffusion\external.py", line 138, in get_eps
return self.inner_model.apply_model(*args, **kwargs)
File "D:\stable diffusion gui\stable-diffusion-webui\modules\sd_models_xl.py", line 37, in apply_model
return self.model(x, t, cond)
File "D:\stable diffusion gui\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1538, in _call_impl
result = forward_call(*args, **kwargs)
File "D:\stable diffusion gui\stable-diffusion-webui\modules\sd_hijack_utils.py", line 17, in <lambda>
setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs))
File "D:\stable diffusion gui\stable-diffusion-webui\modules\sd_hijack_utils.py", line 28, in __call__
return self.__orig_func(*args, **kwargs)
File "D:\stable diffusion gui\stable-diffusion-webui\repositories\generative-models\sgm\modules\diffusionmodules\wrappers.py", line 28, in forward
return self.diffusion_model(
File "D:\stable diffusion gui\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "D:\stable diffusion gui\stable-diffusion-webui\repositories\generative-models\sgm\modules\diffusionmodules\openaimodel.py", line 993, in forward
h = module(h, emb, context)
File "D:\stable diffusion gui\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "D:\stable diffusion gui\stable-diffusion-webui\repositories\generative-models\sgm\modules\diffusionmodules\openaimodel.py", line 98, in forward
x = layer(x, emb)
File "D:\stable diffusion gui\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "D:\stable diffusion gui\stable-diffusion-webui\repositories\generative-models\sgm\modules\diffusionmodules\openaimodel.py", line 317, in forward
return checkpoint(
File "D:\stable diffusion gui\stable-diffusion-webui\repositories\generative-models\sgm\modules\diffusionmodules\util.py", line 165, in checkpoint
return CheckpointFunction.apply(func, len(inputs), *args)
File "D:\stable diffusion gui\stable-diffusion-webui\venv\lib\site-packages\torch\autograd\function.py", line 506, in apply
return super().apply(*args, **kwargs) # type: ignore[misc]
File "D:\stable diffusion gui\stable-diffusion-webui\repositories\generative-models\sgm\modules\diffusionmodules\util.py", line 182, in forward
output_tensors = ctx.run_function(*ctx.input_tensors)
File "D:\stable diffusion gui\stable-diffusion-webui\repositories\generative-models\sgm\modules\diffusionmodules\openaimodel.py", line 346, in _forward
h = self.out_layers(h)
File "D:\stable diffusion gui\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "D:\stable diffusion gui\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\container.py", line 217, in forward
input = module(input)
File "D:\stable diffusion gui\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "D:\stable diffusion gui\stable-diffusion-webui\repositories\generative-models\sgm\modules\diffusionmodules\util.py", line 275, in forward
return super().forward(x.float()).type(x.dtype)
File "D:\stable diffusion gui\stable-diffusion-webui\extensions-builtin\Lora\networks.py", line 459, in network_GroupNorm_forward
return originals.GroupNorm_forward(self, input)
File "D:\stable diffusion gui\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\normalization.py", line 273, in forward
return F.group_norm(
File "D:\stable diffusion gui\stable-diffusion-webui\venv\lib\site-packages\torch\nn\functional.py", line 2530, in group_norm
return torch.group_norm(input, num_groups, weight, bias, eps, torch.backends.cudnn.enabled)
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 10.00 MiB (GPU 0; 8.00 GiB total capacity; 5.45 GiB already allocated; 0 bytes free; 5.64 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
--- Additional informationNo response |
Beta Was this translation helpful? Give feedback.
Replies: 2 comments 1 reply
-
ReActor has nothing to do with "CUDA out of memory", it uses not so much of VRAM (500-550Mb) All I can suggest is to try more powerful GPU or to use optimizations to reduce VRAM usage:
set "--lowram" instead of "--medvram" Also you cant set "Execution Provider" to CPU in the ReActor's Settings Tab - ReActor will work little slower but it won't use VRAM |
Beta Was this translation helpful? Give feedback.
-
Actually i can run only once this extension with SDXL models, then Cuda out of memory. If use Roop with exactly the same configuration (software and hardware) ther is no problem with VRAM. This ext use more VRAM than competitor roop |
Beta Was this translation helpful? Give feedback.
ReActor has nothing to do with "CUDA out of memory", it uses not so much of VRAM (500-550Mb)
All I can suggest is to try more powerful GPU or to use optimizations to reduce VRAM usage:
set "--lowram" instead of "--medvram"
Also try "--opt-sdp-attention --upcast-sampling" instead of "--xformers"
Also you cant set "Execution Provider" to CPU in the ReActor's Settings Tab - ReActor will work little slower but it won't use VRAM