def func(data): print(data) def worker(array): pool = ThreadPoolExecutor(1) for a in array: pool.submit( func,a ) if __name__ == '__main__': processes = [] data = [i for i in range(300)] for i in range(3): p = multiprocessing.Process(target=worker,args=(data[100*i:100*(i+1)],)) processes.append(p) p.start() for p in processes: p.join() Typeerror cannot serializer '_io.BufferedReader' object
时间: 2024-03-15 15:46:46 浏览: 63
go-pool:更好的通用池(sync.Pool)
这个错误可能是因为`multiprocessing.Process`不支持序列化`_io.BufferedReader`对象。你可以尝试将`data`对象转换为可以序列化的对象,例如使用`pickle`模块进行序列化和反序列化。具体来说,你可以将`data`对象序列化并将其作为参数传递给`worker`函数,然后在`worker`函数中反序列化它。以下是修改后的代码示例:
```
import multiprocessing
import pickle
from concurrent.futures import ThreadPoolExecutor
def func(data):
print(data)
def worker(data):
array = pickle.loads(data)
with ThreadPoolExecutor(1) as pool:
for a in array:
pool.submit(func, a)
if __name__ == '__main__':
processes = []
data = [i for i in range(300)]
serialized_data = pickle.dumps(data)
for i in range(3):
p = multiprocessing.Process(target=worker, args=(serialized_data,))
processes.append(p)
p.start()
for p in processes:
p.join()
```
在这个修改后的代码中,我们首先使用`pickle`模块将`data`对象序列化为一个字节串`serialized_data`,然后将`serialized_data`作为参数传递给`worker`函数。在`worker`函数中,我们使用`pickle`模块的`loads`函数将字节串反序列化为原始的`array`列表。这样,我们就可以避免将`_io.BufferedReader`对象传递给`multiprocessing.Process`,从而避免了这个错误。
阅读全文