You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
We run the demo_vid2seq.py
In vid2seq.py line 41 we find that:
self.t5_model.resize_token_embeddings(len(tokenizer) - num_bins) # remove the weights of the 28 tokens that are not used (32128 vs 32100 in the tokenizer)
self.t5_model.resize_token_embeddings(len(tokenizer)) # add time tokens
These two lines of code are the same. We commented out one line and An error occurred:
File "demo_vid2seq.py", line 170, in
temperature=1)
File "/root/miniconda3/envs/VidChapters/lib/python3.7/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/mnt/workspace/ai-story/daikun.zhang/VidChapters/model/vid2seq.py", line 163, in generate
num_return_sequences=num_captions,
File "/root/miniconda3/envs/VidChapters/lib/python3.7/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/root/miniconda3/envs/VidChapters/lib/python3.7/site-packages/transformers/generation/utils.py", line 1534, in generate
**model_kwargs,
File "/root/miniconda3/envs/VidChapters/lib/python3.7/site-packages/transformers/generation/utils.py", line 2814, in beam_search
output_hidden_states=output_hidden_states,
File "/root/miniconda3/envs/VidChapters/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl
return forward_call(*input, **kwargs)
File "/mnt/workspace/ai-story/daikun.zhang/VidChapters/model/modeling_t5.py", line 1698, in forward
return_dict=return_dict,
File "/root/miniconda3/envs/VidChapters/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl
return forward_call(*input, **kwargs)
File "/mnt/workspace/ai-story/daikun.zhang/VidChapters/model/modeling_t5.py", line 1082, in forward
output_attentions=output_attentions,
File "/root/miniconda3/envs/VidChapters/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl
return forward_call(*input, **kwargs)
File "/mnt/workspace/ai-story/daikun.zhang/VidChapters/model/modeling_t5.py", line 710, in forward
output_attentions=output_attentions,
File "/root/miniconda3/envs/VidChapters/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl
return forward_call(*input, **kwargs)
File "/mnt/workspace/ai-story/daikun.zhang/VidChapters/model/modeling_t5.py", line 616, in forward
output_attentions=output_attentions,
File "/root/miniconda3/envs/VidChapters/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl
return forward_call(*input, **kwargs)
File "/mnt/workspace/ai-story/daikun.zhang/VidChapters/model/modeling_t5.py", line 528, in forward
query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head)
File "/root/miniconda3/envs/VidChapters/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl
return forward_call(*input, **kwargs)
File "/root/miniconda3/envs/VidChapters/lib/python3.7/site-packages/torch/nn/modules/linear.py", line 114, in forward
return F.linear(input, self.weight, self.bias)
RuntimeError: CUDA error: CUBLAS_STATUS_EXECUTION_FAILED when calling cublasSgemm( handle, opa, opb, m, n, k, &alpha, a, lda, b, ldb, &beta, c, ldc)
May I ask how we should resolve it?
The text was updated successfully, but these errors were encountered:
We run the demo_vid2seq.py
In vid2seq.py line 41 we find that:
self.t5_model.resize_token_embeddings(len(tokenizer) - num_bins) # remove the weights of the 28 tokens that are not used (32128 vs 32100 in the tokenizer)
self.t5_model.resize_token_embeddings(len(tokenizer)) # add time tokens
These two lines of code are the same. We commented out one line and An error occurred:
File "demo_vid2seq.py", line 170, in
temperature=1)
File "/root/miniconda3/envs/VidChapters/lib/python3.7/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/mnt/workspace/ai-story/daikun.zhang/VidChapters/model/vid2seq.py", line 163, in generate
num_return_sequences=num_captions,
File "/root/miniconda3/envs/VidChapters/lib/python3.7/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/root/miniconda3/envs/VidChapters/lib/python3.7/site-packages/transformers/generation/utils.py", line 1534, in generate
**model_kwargs,
File "/root/miniconda3/envs/VidChapters/lib/python3.7/site-packages/transformers/generation/utils.py", line 2814, in beam_search
output_hidden_states=output_hidden_states,
File "/root/miniconda3/envs/VidChapters/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl
return forward_call(*input, **kwargs)
File "/mnt/workspace/ai-story/daikun.zhang/VidChapters/model/modeling_t5.py", line 1698, in forward
return_dict=return_dict,
File "/root/miniconda3/envs/VidChapters/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl
return forward_call(*input, **kwargs)
File "/mnt/workspace/ai-story/daikun.zhang/VidChapters/model/modeling_t5.py", line 1082, in forward
output_attentions=output_attentions,
File "/root/miniconda3/envs/VidChapters/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl
return forward_call(*input, **kwargs)
File "/mnt/workspace/ai-story/daikun.zhang/VidChapters/model/modeling_t5.py", line 710, in forward
output_attentions=output_attentions,
File "/root/miniconda3/envs/VidChapters/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl
return forward_call(*input, **kwargs)
File "/mnt/workspace/ai-story/daikun.zhang/VidChapters/model/modeling_t5.py", line 616, in forward
output_attentions=output_attentions,
File "/root/miniconda3/envs/VidChapters/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl
return forward_call(*input, **kwargs)
File "/mnt/workspace/ai-story/daikun.zhang/VidChapters/model/modeling_t5.py", line 528, in forward
query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head)
File "/root/miniconda3/envs/VidChapters/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl
return forward_call(*input, **kwargs)
File "/root/miniconda3/envs/VidChapters/lib/python3.7/site-packages/torch/nn/modules/linear.py", line 114, in forward
return F.linear(input, self.weight, self.bias)
RuntimeError: CUDA error: CUBLAS_STATUS_EXECUTION_FAILED when calling
cublasSgemm( handle, opa, opb, m, n, k, &alpha, a, lda, b, ldb, &beta, c, ldc)
May I ask how we should resolve it?
The text was updated successfully, but these errors were encountered: