p=multiprocessing.Pool
时间: 2023-04-01 20:00:47 浏览: 130
这是一个关于 Python 多进程编程的问题,我可以回答。multiprocessing.Pool 是 Python 中的一个模块,可以用来创建进程池,从而实现多进程编程。通过 Pool 对象的 map() 方法,可以将一个函数应用到一个可迭代对象的所有元素上,从而实现并行计算。
相关问题
import pickle def func(data): print(data) def worker(array): array = pickle.loads(array) pool = ThreadPoolExecutor(1) for a in array: pool.submit( func,a ) pool.shutdown(wait=True) if __name__ == '__main__': processes = [] data = [i for i in range(300)] data = pickle.dumps(data) for i in range(3): p = multiprocessing.Process(target=worker, args=(data[100*i:100*(i+1)],)) processes.append(p) p.start() for p in processes: p.join() _pickle.UnpicklingError: pickle data was truncated
这个错误是由于 pickle 数据被截断导致的。在这个代码中,您将数据分成三个部分,并在三个进程中处理它们。在这个过程中,有可能会发生数据被截断的情况,导致无法正确反序列化 pickle 数据。您可以尝试增加数据的大小,或者尝试将其分成更小的块进行处理,以避免这个错误。另外,您还可以将 pickle 数据保存到文件中,然后在不同的进程中读取和处理它们,以避免 pickle 数据被截断的问题。
import http.client from html.parser import HTMLParser import argparse from concurrent.futures import ThreadPoolExecutor import multiprocessing.pool prefix = "save/" readed_path = multiprocessing.Manager().list() cur_path = multiprocessing.Manager().list() new_path = multiprocessing.Manager().list() lock = multiprocessing.Lock() class MyHttpParser(HTMLParser): def __init__(self): HTMLParser.__init__(self) self.tag = [] self.href = "" self.txt = "" def handle_starttag(self, tag, attrs): self.tag.append(tag) # print("start tag in list :" + str(self.tag)) if tag == "a": for att in attrs: if att[0] == 'href': self.href = att[1] def handle_endtag(self, tag): if tag == "a" and len(self.tag) > 2 and self.tag[-2] == "div": print("in div, link txt is %s ." % self.txt) print("in div, link url is %s ." % self.href) lock.acquire() if not self.href in readed_path: readed_path.append(self.href) new_path.append(self.href) # print("end tag in list :" + str(self.tag)) lock.release() self.tag.pop(-1) def handle_data(self, data): if len(self.tag) >= 1 and self.tag[-1] == "a": self.txt = data def LoadHtml(path, file_path): if len(file_path) == 0: file_path = "/" conn = http.client.HTTPConnection(path) try: conn.request("GET", file_path) response = conn.getresponse() print(response.status, response.reason, response.version) data = response.read().decode("utf-8") if response.status == 301: data = response.getheader("Location") lock.acquire() new_path.append(data) lock.release() data = "" #print(data) conn.close() return data except Exception as e: print(e.args) def ParseArgs(): # 初始化解析器 parser = argparse.ArgumentParser() # 定义参数 parser.add_argument("-p", "--path", help="域名") parser.add_argument("-d", "--deep", type=int, help="递归深度") # 解析 args = parser.parse_args() return args def formatPath(path): path = path.removeprefix("https://") path = path.removeprefix("http://") path = path.removeprefix("//") return path def doWork(path): path = formatPath(path) m = path.find("/") if m == -1: m = len(path) data = LoadHtml(path[:m], path[m:]) with open(prefix + path[:m] + ".html", "w+", encoding="utf-8") as f: f.write(data) parse.feed(data) def work(deep,maxdeep): if deep > maxdeep: return args = ParseArgs() cur_path.append(formatPath(args.path)) readed_path.append(formatPath(args.path)) parse = MyHttpParser() e = multiprocessing.Pool(4) for i in range(args.deep): size = len(cur_path) e.map(doWork,cur_path) cur_path[:]=[] for p in new_path: cur_path.append(p) new_path[:]=[] print(i)优化此代码能在windows下运行
在Windows下,`multiprocessing` 库不支持在交互式(interactive)窗口下运行,需要将其放在一个独立的脚本中运行。另外,如果要使用 `multiprocessing.Manager().list()` 实现进程间共享数据,需要使用 `multiprocessing.Manager()` 返回的对象来创建进程池(`multiprocessing.Pool()`)。
以下是对代码的优化:
1. 将 `multiprocessing.Manager().list()` 替换为 `multiprocessing.Manager().Queue()`,因为 Queue 更适合用于进程间通信。
2. 将 `multiprocessing.Pool()` 替换为 `concurrent.futures.ProcessPoolExecutor()`,因为后者可以更好地处理进程池中的异常。
3. 将 `work()` 函数改为使用 `concurrent.futures.ProcessPoolExecutor()` 来执行任务。
4. 将 `MyHttpParser` 类的属性(`tag`、`href`、`txt`)改为实例属性,因为多个进程可能同时访问这些属性。
5. 在 `doWork()` 函数中,改为使用 `requests` 库来发送 HTTP 请求,因为它比 `http.client` 更易用。
以下是优化后的代码:
```python
import requests
from html.parser import HTMLParser
import argparse
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor, as_completed
import multiprocessing
prefix = "save/"
readed_path = multiprocessing.Manager().Queue()
cur_path = multiprocessing.Manager().Queue()
new_path = multiprocessing.Manager().Queue()
lock = multiprocessing.Lock()
class MyHttpParser(HTMLParser):
def __init__(self):
super().__init__()
self.tag = []
self.href = ""
self.txt = ""
def handle_starttag(self, tag, attrs):
self.tag.append(tag)
if tag == "a":
for att in attrs:
if att[0] == 'href':
self.href = att[1]
def handle_endtag(self, tag):
if tag == "a" and len(self.tag) > 2 and self.tag[-2] == "div":
print("in div, link txt is %s ." % self.txt)
print("in div, link url is %s ." % self.href)
if not self.href in readed_path.queue:
readed_path.put(self.href)
new_path.put(self.href)
self.tag.pop(-1)
def handle_data(self, data):
if len(self.tag) >= 1 and self.tag[-1] == "a":
self.txt = data
def LoadHtml(path, file_path):
if len(file_path) == 0:
file_path = "/"
url = f"http://{path}{file_path}"
try:
response = requests.get(url)
print(response.status_code, response.reason, response.raw.version)
data = response.content.decode("utf-8")
if response.status_code == 301:
data = response.headers["Location"]
if not data in readed_path.queue:
new_path.put(data)
data = ""
return data
except Exception as e:
print(e.args)
def ParseArgs():
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--path", help="domain name")
parser.add_argument("-d", "--deep", type=int, help="recursion depth")
args = parser.parse_args()
return args
def formatPath(path):
path = path.removeprefix("https://")
path = path.removeprefix("http://")
path = path.removeprefix("//")
return path
def doWork(path):
path = formatPath(path)
m = path.find("/")
if m == -1:
m = len(path)
data = LoadHtml(path[:m], path[m:])
with open(prefix + path[:m] + ".html", "w+", encoding="utf-8") as f:
f.write(data)
parse.feed(data)
def work(maxdeep):
args = ParseArgs()
cur_path.put(formatPath(args.path))
readed_path.put(formatPath(args.path))
parse = MyHttpParser()
with ProcessPoolExecutor(max_workers=4) as executor:
for i in range(args.deep):
size = cur_path.qsize()
futures = [executor.submit(doWork, cur_path.get()) for _ in range(size)]
for future in as_completed(futures):
try:
future.result()
except Exception as e:
print(e)
cur_path.queue.clear()
while not new_path.empty():
cur_path.put(new_path.get())
print(i)
if __name__ == '__main__':
work(5)
```
阅读全文