I'm training a python code.
i'm using rtx 3080, wsl2 and docker.
when i check if docker find a gpu, it does.
the code requirements are pytorch 1.2.0 and cuda 10.0
is there any way that i can run this code or fix the error?
Traceback (most recent call last):
File "oscar/run_captioning.py", line 884, in <module>
main()
File "oscar/run_captioning.py", line 863, in main
global_step, avg_loss = train(args, train_dataset, val_dataset, model, tokenizer)
File "oscar/run_captioning.py", line 434, in train
outputs = model(**inputs)
File "/home/apple/anaconda3/envs/py37/lib/python3.7/site-packages/torch/nn/modules/module.py", line 547, in __call__
result = self.forward(*input, **kwargs)
File "/mnt/d/oscar/oscar/modeling/modeling_bert.py", line 440, in forward
return self.encode_forward(*args, **kwargs)
File "/mnt/d/oscar/oscar/modeling/modeling_bert.py", line 448, in encode_forward
encoder_history_states=encoder_history_states)
File "/home/apple/anaconda3/envs/py37/lib/python3.7/site-packages/torch/nn/modules/module.py", line 547, in __call__
result = self.forward(*input, **kwargs)
File "/mnt/d/oscar/oscar/modeling/modeling_bert.py", line 271, in forward
encoder_history_states=encoder_history_states)
File "/home/apple/anaconda3/envs/py37/lib/python3.7/site-packages/torch/nn/modules/module.py", line 547, in __call__
result = self.forward(*input, **kwargs)
File "/mnt/d/oscar/oscar/modeling/modeling_bert.py", line 109, in forward
history_state)
File "/home/apple/anaconda3/envs/py37/lib/python3.7/site-packages/torch/nn/modules/module.py", line 547, in __call__
result = self.forward(*input, **kwargs)
File "/mnt/d/oscar/oscar/modeling/modeling_bert.py", line 140, in forward
head_mask, history_state)
File "/home/apple/anaconda3/envs/py37/lib/python3.7/site-packages/torch/nn/modules/module.py", line 547, in __call__
result = self.forward(*input, **kwargs)
File "/mnt/d/oscar/oscar/modeling/modeling_bert.py", line 82, in forward
self_outputs = self.self(input_tensor, attention_mask, head_mask, history_state)
File "/home/apple/anaconda3/envs/py37/lib/python3.7/site-packages/torch/nn/modules/module.py", line 547, in __call__
result = self.forward(*input, **kwargs)
File "/mnt/d/oscar/oscar/modeling/modeling_bert.py", line 36, in forward
mixed_query_layer = self.query(hidden_states)
File "/home/apple/anaconda3/envs/py37/lib/python3.7/site-packages/torch/nn/modules/module.py", line 547, in __call__
result = self.forward(*input, **kwargs)
File "/home/apple/anaconda3/envs/py37/lib/python3.7/site-packages/torch/nn/modules/linear.py", line 87, in forward
return F.linear(input, self.weight, self.bias)
File "/home/apple/anaconda3/envs/py37/lib/python3.7/site-packages/torch/nn/functional.py", line 1371, in linear
output = input.matmul(weight.t())
RuntimeError: CUDA error: CUBLAS_STATUS_EXECUTION_FAILED when calling `cublasSgemm( handle, opa, opb, m, n, k, &alpha, a, lda, b, ldb, &beta, c, ldc)`
question from:
https://stackoverflow.com/questions/65951506/pytorch-cuda-error-happendcublas-status-execution-failed