Traceback (most recent call last): File "D:\Documation\Coding\py_test_grow\main.py", line 97, in <module> disjoint_set = parallel_connected_components(image, num_processes) File "D:\Documation\Coding\py_test_grow\main.py", line 57, in parallel_connected_components result.get() File "D:\Download\python38\lib\multiprocessing\pool.py", line 771, in get raise self._value File "D:\Download\python38\lib\multiprocessing\pool.py", line 537, in _handle_tasks put(task) File "D:\Download\python38\lib\multiprocessing\connection.py", line 206, in send self._send_bytes(_ForkingPickler.dumps(obj)) File "D:\Download\python38\lib\multiprocessing\reduction.py", line 51, in dumps cls(buf, protocol).dump(obj) File "D:\Download\python38\lib\multiprocessing\synchronize.py", line 101, in __getstate__ context.assert_spawning(self) File "D:\Download\python38\lib\multiprocessing\context.py", line 359, in assert_spawning raise RuntimeError( RuntimeError: Lock objects should only be shared between processes through inheritance Process finished with exit code 1 这次又是这样的报错
时间: 2023-08-04 20:06:27 浏览: 215
这个报错是因为在使用 `multiprocessing.Pool` 进行并行处理时,锁对象 `lock` 不能直接传递给子进程。根据报错信息,锁对象只能通过继承的方式在子进程之间共享。
为了解决这个问题,你可以使用 `multiprocessing.Manager` 模块中的 `Lock` 对象来创建一个可在多个进程之间共享的锁。以下是修改后的示例代码:
```python
import numpy as np
import multiprocessing as mp
def find_connected_components(image, start_row, end_row, disjoint_set, lock):
rows, cols = image.shape
for i in range(start_row, end_row):
for j in range(cols):
if image[i, j] == 1:
# 如果当前像素点是前景点
neighbors = []
if i > 0 and image[i-1, j] == 1:
neighbors.append((i-1, j))
if i < rows-1 and image[i+1, j] == 1:
neighbors.append((i+1, j))
if j > 0 and image[i, j-1] == 1:
neighbors.append((i, j-1))
if j < cols-1 and image[i, j+1] == 1:
neighbors.append((i, j+1))
if len(neighbors) == 0:
# 如果当前像素点没有相邻的前景点,则创建一个新的连通分量
with lock:
disjoint_set.make_set((i, j))
else:
# 如果当前像素点有相邻的前景点,则将其与相邻点合并为同一连通分量
with lock:
for neighbor in neighbors:
disjoint_set.union((i, j), neighbor)
def parallel_connected_components(image, num_processes):
rows, cols = image.shape
# 使用并查集数据结构来记录连通分量
disjoint_set = DisjointSet()
# 创建进程池
pool = mp.Pool(processes=num_processes)
# 创建锁
manager = mp.Manager()
lock = manager.Lock()
# 计算每个进程处理的行数
chunk_size = rows // num_processes
# 分配任务给不同的进程
results = []
for i in range(num_processes):
start_row = i * chunk_size
end_row = start_row + chunk_size if i < num_processes-1 else rows
result = pool.apply_async(find_connected_components, (image, start_row, end_row, disjoint_set, lock))
results.append(result)
# 等待所有进程完成
for result in results:
result.get()
# 关闭进程池
pool.close()
pool.join()
return disjoint_set
# 示例使用的并查集数据结构
class DisjointSet:
def __init__(self):
self.parent = {}
def make_set(self, x):
self.parent[x] = x
def find(self, x):
if self.parent[x] != x:
self.parent[x] = self.find(self.parent[x])
return self.parent[x]
def union(self, x, y):
root_x = self.find(x)
root_y = self.find(y)
if root_x != root_y:
self.parent[root_x] = root_y
# 示例
if __name__ == '__main__':
# 二值图片示例
image = np.array([[0, 1, 0, 0, 1],
[1, 1, 1, 0, 0],
[0, 0, 1, 1, 0],
[0, 0, 0, 0, 1],
[1, 0, 1, 0, 1]])
# 并行处理连通分量
num_processes = 2
disjoint_set = parallel_connected_components(image, num_processes)
# 输出每个连通分量的像素点集合
connected_components = {}
for pixel in disjoint_set.parent:
root = disjoint_set.find(pixel)
if root not in connected_components:
connected_components[root] = []
connected_components[root].append(pixel)
print("Connected components:")
for component in connected_components.values():
print(component)
```
通过使用 `multiprocessing.Manager` 中的 `Lock` 对象,并在访问和修改 `disjoint_set` 数据结构时使用 `with lock` 块来保护共享资源的访问,可以确保并行处理过程中的线程安全性。
希望这次修改能解决你遇到的问题。如果还有其他问题,请随时提问。
阅读全文