You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Hello , i have an issue with img2vid. i got below error when i try to create a video from an image. Zluda is active. There is no problem with image creating. How can i solve this ?
Console log :
!!! Exception during processing!!! CUDA out of memory. Tried to allocate 22.15 GiB. GPU
Traceback (most recent call last):
File "D:\AI\ComfyUI-Zluda\execution.py", line 151, in recursive_execute
output_data, output_ui = get_output_data(obj, input_data_all)
File "D:\AI\ComfyUI-Zluda\execution.py", line 81, in get_output_data
return_values = map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True)
File "D:\AI\ComfyUI-Zluda\execution.py", line 74, in map_node_over_list
results.append(getattr(obj, func)(**slice_dict(input_data_all, i)))
File "D:\AI\ComfyUI-Zluda\nodes.py", line 1344, in sample
return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
File "D:\AI\ComfyUI-Zluda\nodes.py", line 1314, in common_ksampler
samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
File "D:\AI\ComfyUI-Zluda\custom_nodes\ComfyUI-Impact-Pack\modules\impact\sample_error_enhancer.py", line 22, in informative_sample
raise e
File "D:\AI\ComfyUI-Zluda\custom_nodes\ComfyUI-Impact-Pack\modules\impact\sample_error_enhancer.py", line 9, in informative_sample
return original_sample(*args, **kwargs) # This code helps interpret error messages that occur within exceptions but does not have any impact on other operations.
File "D:\AI\ComfyUI-Zluda\comfy\sample.py", line 37, in sample
samples = sampler.sample(noise, positive, negative, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask, sigmas=sigmas, callback=callback, disable_pbar=disable_pbar, seed=seed)
File "D:\AI\ComfyUI-Zluda\comfy\samplers.py", line 761, in sample
return sample(self.model, noise, positive, negative, cfg, self.device, sampler, sigmas, self.model_options, latent_image=latent_image, denoise_mask=denoise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed)
File "D:\AI\ComfyUI-Zluda\comfy\samplers.py", line 663, in sample
return cfg_guider.sample(noise, latent_image, sampler, sigmas, denoise_mask, callback, disable_pbar, seed)
File "D:\AI\ComfyUI-Zluda\comfy\samplers.py", line 650, in sample
output = self.inner_sample(noise, latent_image, device, sampler, sigmas, denoise_mask, callback, disable_pbar, seed) File "D:\AI\ComfyUI-Zluda\comfy\samplers.py", line 629, in inner_sample
samples = sampler.sample(self, sigmas, extra_args, callback, noise, latent_image, denoise_mask, disable_pbar)
File "D:\AI\ComfyUI-Zluda\comfy\samplers.py", line 534, in sample
samples = self.sampler_function(model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar, **self.extra_options)
File "D:\AI\ComfyUI-Zluda\venv\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "D:\AI\ComfyUI-Zluda\comfy\k_diffusion\sampling.py", line 137, in sample_euler
denoised = model(x, sigma_hat * s_in, **extra_args)
File "D:\AI\ComfyUI-Zluda\comfy\samplers.py", line 272, in call
out = self.inner_model(x, sigma, model_options=model_options, seed=seed)
File "D:\AI\ComfyUI-Zluda\comfy\samplers.py", line 616, in call
return self.predict_noise(*args, **kwargs)
File "D:\AI\ComfyUI-Zluda\comfy\samplers.py", line 619, in predict_noise
return sampling_function(self.inner_model, x, timestep, self.conds.get("negative", None), self.conds.get("positive", None), self.cfg, model_options=model_options, seed=seed)
File "D:\AI\ComfyUI-Zluda\comfy\samplers.py", line 258, in sampling_function
out = calc_cond_batch(model, conds, x, timestep, model_options)
File "D:\AI\ComfyUI-Zluda\comfy\samplers.py", line 218, in calc_cond_batch
output = model.apply_model(input_x, timestep_, **c).chunk(batch_chunks)
File "D:\AI\ComfyUI-Zluda\comfy\model_base.py", line 97, in apply_model
model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float()
File "D:\AI\ComfyUI-Zluda\venv\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\AI\ComfyUI-Zluda\venv\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl
return forward_call(*args, **kwargs)
File "D:\AI\ComfyUI-Zluda\comfy\ldm\modules\diffusionmodules\openaimodel.py", line 852, in forward
h = forward_timestep_embed(module, h, emb, context, transformer_options, time_context=time_context, num_video_frames=num_video_frames, image_only_indicator=image_only_indicator)
File "D:\AI\ComfyUI-Zluda\comfy\ldm\modules\diffusionmodules\openaimodel.py", line 40, in forward_timestep_embed
x = layer(x, context, time_context, num_video_frames, image_only_indicator, transformer_options)
File "D:\AI\ComfyUI-Zluda\venv\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\AI\ComfyUI-Zluda\venv\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl
return forward_call(*args, **kwargs)
File "D:\AI\ComfyUI-Zluda\comfy\ldm\modules\attention.py", line 789, in forward
x = block(
File "D:\AI\ComfyUI-Zluda\venv\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\AI\ComfyUI-Zluda\venv\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl
return forward_call(*args, **kwargs)
File "D:\AI\ComfyUI-Zluda\comfy\ldm\modules\attention.py", line 531, in forward
n = self.attn1(n, context=context_attn1, value=value_attn1)
File "D:\AI\ComfyUI-Zluda\venv\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\AI\ComfyUI-Zluda\venv\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl
return forward_call(*args, **kwargs)
File "D:\AI\ComfyUI-Zluda\comfy\ldm\modules\attention.py", line 425, in forward
out = optimized_attention(q, k, v, self.heads, attn_precision=self.attn_precision)
File "D:\AI\ComfyUI-Zluda\comfy\ldm\modules\attention.py", line 357, in attention_pytorch
out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False)
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 22.15 GiB. GPU
Prompt executed in 0.20 seconds
The text was updated successfully, but these errors were encountered:
i have 6800xt. i will check fp8 or i will new nvidia gpu :s
Since this is about amount of VRAM on your graphics card, 7900 xtx would probably be a better choice because it is way cheaper than RTX 4090 while having the same amount of VRAM. Tho not 100% sure how it compares to 3090 which iirc is the same too
Hello , i have an issue with img2vid. i got below error when i try to create a video from an image. Zluda is active. There is no problem with image creating. How can i solve this ?
Console log :
!!! Exception during processing!!! CUDA out of memory. Tried to allocate 22.15 GiB. GPU
Traceback (most recent call last):
File "D:\AI\ComfyUI-Zluda\execution.py", line 151, in recursive_execute
output_data, output_ui = get_output_data(obj, input_data_all)
File "D:\AI\ComfyUI-Zluda\execution.py", line 81, in get_output_data
return_values = map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True)
File "D:\AI\ComfyUI-Zluda\execution.py", line 74, in map_node_over_list
results.append(getattr(obj, func)(**slice_dict(input_data_all, i)))
File "D:\AI\ComfyUI-Zluda\nodes.py", line 1344, in sample
return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
File "D:\AI\ComfyUI-Zluda\nodes.py", line 1314, in common_ksampler
samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
File "D:\AI\ComfyUI-Zluda\custom_nodes\ComfyUI-Impact-Pack\modules\impact\sample_error_enhancer.py", line 22, in informative_sample
raise e
File "D:\AI\ComfyUI-Zluda\custom_nodes\ComfyUI-Impact-Pack\modules\impact\sample_error_enhancer.py", line 9, in informative_sample
return original_sample(*args, **kwargs) # This code helps interpret error messages that occur within exceptions but does not have any impact on other operations.
File "D:\AI\ComfyUI-Zluda\comfy\sample.py", line 37, in sample
samples = sampler.sample(noise, positive, negative, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask, sigmas=sigmas, callback=callback, disable_pbar=disable_pbar, seed=seed)
File "D:\AI\ComfyUI-Zluda\comfy\samplers.py", line 761, in sample
return sample(self.model, noise, positive, negative, cfg, self.device, sampler, sigmas, self.model_options, latent_image=latent_image, denoise_mask=denoise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed)
File "D:\AI\ComfyUI-Zluda\comfy\samplers.py", line 663, in sample
return cfg_guider.sample(noise, latent_image, sampler, sigmas, denoise_mask, callback, disable_pbar, seed)
File "D:\AI\ComfyUI-Zluda\comfy\samplers.py", line 650, in sample
output = self.inner_sample(noise, latent_image, device, sampler, sigmas, denoise_mask, callback, disable_pbar, seed) File "D:\AI\ComfyUI-Zluda\comfy\samplers.py", line 629, in inner_sample
samples = sampler.sample(self, sigmas, extra_args, callback, noise, latent_image, denoise_mask, disable_pbar)
File "D:\AI\ComfyUI-Zluda\comfy\samplers.py", line 534, in sample
samples = self.sampler_function(model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar, **self.extra_options)
File "D:\AI\ComfyUI-Zluda\venv\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "D:\AI\ComfyUI-Zluda\comfy\k_diffusion\sampling.py", line 137, in sample_euler
denoised = model(x, sigma_hat * s_in, **extra_args)
File "D:\AI\ComfyUI-Zluda\comfy\samplers.py", line 272, in call
out = self.inner_model(x, sigma, model_options=model_options, seed=seed)
File "D:\AI\ComfyUI-Zluda\comfy\samplers.py", line 616, in call
return self.predict_noise(*args, **kwargs)
File "D:\AI\ComfyUI-Zluda\comfy\samplers.py", line 619, in predict_noise
return sampling_function(self.inner_model, x, timestep, self.conds.get("negative", None), self.conds.get("positive", None), self.cfg, model_options=model_options, seed=seed)
File "D:\AI\ComfyUI-Zluda\comfy\samplers.py", line 258, in sampling_function
out = calc_cond_batch(model, conds, x, timestep, model_options)
File "D:\AI\ComfyUI-Zluda\comfy\samplers.py", line 218, in calc_cond_batch
output = model.apply_model(input_x, timestep_, **c).chunk(batch_chunks)
File "D:\AI\ComfyUI-Zluda\comfy\model_base.py", line 97, in apply_model
model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float()
File "D:\AI\ComfyUI-Zluda\venv\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\AI\ComfyUI-Zluda\venv\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl
return forward_call(*args, **kwargs)
File "D:\AI\ComfyUI-Zluda\comfy\ldm\modules\diffusionmodules\openaimodel.py", line 852, in forward
h = forward_timestep_embed(module, h, emb, context, transformer_options, time_context=time_context, num_video_frames=num_video_frames, image_only_indicator=image_only_indicator)
File "D:\AI\ComfyUI-Zluda\comfy\ldm\modules\diffusionmodules\openaimodel.py", line 40, in forward_timestep_embed
x = layer(x, context, time_context, num_video_frames, image_only_indicator, transformer_options)
File "D:\AI\ComfyUI-Zluda\venv\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\AI\ComfyUI-Zluda\venv\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl
return forward_call(*args, **kwargs)
File "D:\AI\ComfyUI-Zluda\comfy\ldm\modules\attention.py", line 789, in forward
x = block(
File "D:\AI\ComfyUI-Zluda\venv\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\AI\ComfyUI-Zluda\venv\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl
return forward_call(*args, **kwargs)
File "D:\AI\ComfyUI-Zluda\comfy\ldm\modules\attention.py", line 531, in forward
n = self.attn1(n, context=context_attn1, value=value_attn1)
File "D:\AI\ComfyUI-Zluda\venv\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\AI\ComfyUI-Zluda\venv\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl
return forward_call(*args, **kwargs)
File "D:\AI\ComfyUI-Zluda\comfy\ldm\modules\attention.py", line 425, in forward
out = optimized_attention(q, k, v, self.heads, attn_precision=self.attn_precision)
File "D:\AI\ComfyUI-Zluda\comfy\ldm\modules\attention.py", line 357, in attention_pytorch
out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False)
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 22.15 GiB. GPU
Prompt executed in 0.20 seconds
The text was updated successfully, but these errors were encountered: