'How to use multiprocess when input is a torch.tensor with gradient?
import torch.multiprocessing as mp
import torch
import time
class test():
def __init__(self,X,Y):
self.X=X #.share_memory_()
self.Y=Y
self.n=X.shape[0]
self.result()
def result(self):
self.result=torch.zeros([self.n])
pool = mp.Pool()
for i in range(self.n):
pool.apply_async(self.f, (i,))
print('start')
pool.close()
pool.join()
print('result',self.result)
def f(self,i):
time.sleep(2)
xi=self.X[i]
yi=self.Y[i]
self.result[i]=xi+yi
If X do not have gradient, it works correctly.
if __name__ == '__main__':
from temp import *
import torch
X=torch.tensor([1.0,2.0,3.0])
Y=X+1
A=test(X,Y)
It will print [3.0,5.0,7.0].
But if X has gradient
if __name__ == '__main__':
from temp import *
import torch
X=torch.tensor([1.0,2.0,3.0]).requires_grad_()
Y=X+1
A=test(X,Y)
Then it print [0,0,0].
When X has gradient, or X is in 'cuda', it will print [0,0,0,0], which is not correct.
Sources
This article follows the attribution requirements of Stack Overflow and is licensed under CC BY-SA 3.0.
Source: Stack Overflow
Solution | Source |
---|