def printData(data): result_file=open("result.txt",'a',encoding='utf-8') result_file.write(str(data)+'\n') result_file.close()
时间: 2024-05-18 14:16:14 浏览: 93
这段代码是一个简单的输出函数,用于将字符串数据写入到文件中。
该函数的输入参数是一个字符串 data,表示要写入文件的数据。函数通过打开文件 "result.txt" 并将数据写入文件中,最后关闭文件。
需要注意的是,打开文件时使用了 "a" 模式,表示以追加模式打开文件,如果文件不存在则创建文件。这样做的好处是可以将多次调用该函数输出的数据写入同一个文件中,而不是每次重写文件。同时,函数还指定了文件的编码格式为 UTF-8,以确保文件中文字符能够正确保存。
相关问题
import requestsfrom html.parser import HTMLParserimport argparsefrom concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor, as_completedimport multiprocessingprefix = "save/"readed_path = multiprocessing.Manager().Queue()cur_path = multiprocessing.Manager().Queue()new_path = multiprocessing.Manager().Queue()lock = multiprocessing.Lock()class MyHttpParser(HTMLParser): def __init__(self): super().__init__() self.tag = [] self.href = "" self.txt = "" def handle_starttag(self, tag, attrs): self.tag.append(tag) if tag == "a": for att in attrs: if att[0] == 'href': self.href = att[1] def handle_endtag(self, tag): if tag == "a" and len(self.tag) > 2 and self.tag[-2] == "div": print("in div, link txt is %s ." % self.txt) print("in div, link url is %s ." % self.href) if not self.href in readed_path.queue: readed_path.put(self.href) new_path.put(self.href) self.tag.pop(-1) def handle_data(self, data): if len(self.tag) >= 1 and self.tag[-1] == "a": self.txt = datadef LoadHtml(path, file_path): if len(file_path) == 0: file_path = "/" url = f"http://{path}{file_path}" try: response = requests.get(url) print(response.status_code, response.reason, response.raw.version) data = response.content.decode("utf-8") if response.status_code == 301: data = response.headers["Location"] if not data in readed_path.queue: new_path.put(data) data = "" return data except Exception as e: print(e.args)def ParseArgs(): parser = argparse.ArgumentParser() parser.add_argument("-p", "--path", help="domain name") parser.add_argument("-d", "--deep", type=int, help="recursion depth") args = parser.parse_args() return argsdef formatPath(path): path = path.removeprefix("https://") path = path.removeprefix("http://") path = path.removeprefix("//") return pathdef doWork(path): path = formatPath(path) m = path.find("/") if m == -1: m = len(path) data = LoadHtml(path[:m], path[m:]) with open(prefix + path[:m] + ".html", "w+", encoding="utf-8") as f: f.write(data) parse.feed(data)def work(maxdeep): args = ParseArgs() cur_path.put(formatPath(args.path)) readed_path.put(formatPath(args.path)) parse = MyHttpParser() with ProcessPoolExecutor(max_workers=4) as executor: for i in range(args.deep): size = cur_path.qsize() futures = [executor.submit(doWork, cur_path.get()) for _ in range(size)] for future in as_completed(futures): try: future.result() except Exception as e: print(e) cur_path.queue.clear() while not new_path.empty(): cur_path.put(new_path.get()) print(i)if __name__ == '__main__': work(5)此代码出现Unresolved reference 'parse'
这是因为您在 `doWork` 函数中使用了 `parse` 变量,但是该变量未在 `doWork` 函数中定义或传递。您需要将 `parse` 变量作为参数传递给 `doWork` 函数,或者将其定义为全局变量,以便在 `doWork` 函数中使用它。如果您决定使用全局变量,请确保在使用它之前已经对其进行了定义。
import http.client from html.parser import HTMLParser import argparse from concurrent.futures import ThreadPoolExecutor import multiprocessing.pool prefix = "save/" readed_path = multiprocessing.Manager().list() cur_path = multiprocessing.Manager().list() new_path = multiprocessing.Manager().list() lock = multiprocessing.Lock() class MyHttpParser(HTMLParser): def __init__(self): HTMLParser.__init__(self) self.tag = [] self.href = "" self.txt = "" def handle_starttag(self, tag, attrs): self.tag.append(tag) # print("start tag in list :" + str(self.tag)) if tag == "a": for att in attrs: if att[0] == 'href': self.href = att[1] def handle_endtag(self, tag): if tag == "a" and len(self.tag) > 2 and self.tag[-2] == "div": print("in div, link txt is %s ." % self.txt) print("in div, link url is %s ." % self.href) lock.acquire() if not self.href in readed_path: readed_path.append(self.href) new_path.append(self.href) # print("end tag in list :" + str(self.tag)) lock.release() self.tag.pop(-1) def handle_data(self, data): if len(self.tag) >= 1 and self.tag[-1] == "a": self.txt = data def LoadHtml(path, file_path): if len(file_path) == 0: file_path = "/" conn = http.client.HTTPConnection(path) try: conn.request("GET", file_path) response = conn.getresponse() print(response.status, response.reason, response.version) data = response.read().decode("utf-8") if response.status == 301: data = response.getheader("Location") lock.acquire() new_path.append(data) lock.release() data = "" #print(data) conn.close() return data except Exception as e: print(e.args) def ParseArgs(): # 初始化解析器 parser = argparse.ArgumentParser() # 定义参数 parser.add_argument("-p", "--path", help="域名") parser.add_argument("-d", "--deep", type=int, help="递归深度") # 解析 args = parser.parse_args() return args def formatPath(path): path = path.removeprefix("https://") path = path.removeprefix("http://") path = path.removeprefix("//") return path def doWork(path): path = formatPath(path) m = path.find("/") if m == -1: m = len(path) data = LoadHtml(path[:m], path[m:]) with open(prefix + path[:m] + ".html", "w+", encoding="utf-8") as f: f.write(data) parse.feed(data) def work(deep,maxdeep): if deep > maxdeep: return args = ParseArgs() cur_path.append(formatPath(args.path)) readed_path.append(formatPath(args.path)) parse = MyHttpParser() e = multiprocessing.Pool(4) for i in range(args.deep): size = len(cur_path) e.map(doWork,cur_path) cur_path[:]=[] for p in new_path: cur_path.append(p) new_path[:]=[] print(i)优化此代码能在windows下运行
在Windows下,`multiprocessing` 库不支持在交互式(interactive)窗口下运行,需要将其放在一个独立的脚本中运行。另外,如果要使用 `multiprocessing.Manager().list()` 实现进程间共享数据,需要使用 `multiprocessing.Manager()` 返回的对象来创建进程池(`multiprocessing.Pool()`)。
以下是对代码的优化:
1. 将 `multiprocessing.Manager().list()` 替换为 `multiprocessing.Manager().Queue()`,因为 Queue 更适合用于进程间通信。
2. 将 `multiprocessing.Pool()` 替换为 `concurrent.futures.ProcessPoolExecutor()`,因为后者可以更好地处理进程池中的异常。
3. 将 `work()` 函数改为使用 `concurrent.futures.ProcessPoolExecutor()` 来执行任务。
4. 将 `MyHttpParser` 类的属性(`tag`、`href`、`txt`)改为实例属性,因为多个进程可能同时访问这些属性。
5. 在 `doWork()` 函数中,改为使用 `requests` 库来发送 HTTP 请求,因为它比 `http.client` 更易用。
以下是优化后的代码:
```python
import requests
from html.parser import HTMLParser
import argparse
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor, as_completed
import multiprocessing
prefix = "save/"
readed_path = multiprocessing.Manager().Queue()
cur_path = multiprocessing.Manager().Queue()
new_path = multiprocessing.Manager().Queue()
lock = multiprocessing.Lock()
class MyHttpParser(HTMLParser):
def __init__(self):
super().__init__()
self.tag = []
self.href = ""
self.txt = ""
def handle_starttag(self, tag, attrs):
self.tag.append(tag)
if tag == "a":
for att in attrs:
if att[0] == 'href':
self.href = att[1]
def handle_endtag(self, tag):
if tag == "a" and len(self.tag) > 2 and self.tag[-2] == "div":
print("in div, link txt is %s ." % self.txt)
print("in div, link url is %s ." % self.href)
if not self.href in readed_path.queue:
readed_path.put(self.href)
new_path.put(self.href)
self.tag.pop(-1)
def handle_data(self, data):
if len(self.tag) >= 1 and self.tag[-1] == "a":
self.txt = data
def LoadHtml(path, file_path):
if len(file_path) == 0:
file_path = "/"
url = f"http://{path}{file_path}"
try:
response = requests.get(url)
print(response.status_code, response.reason, response.raw.version)
data = response.content.decode("utf-8")
if response.status_code == 301:
data = response.headers["Location"]
if not data in readed_path.queue:
new_path.put(data)
data = ""
return data
except Exception as e:
print(e.args)
def ParseArgs():
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--path", help="domain name")
parser.add_argument("-d", "--deep", type=int, help="recursion depth")
args = parser.parse_args()
return args
def formatPath(path):
path = path.removeprefix("https://")
path = path.removeprefix("http://")
path = path.removeprefix("//")
return path
def doWork(path):
path = formatPath(path)
m = path.find("/")
if m == -1:
m = len(path)
data = LoadHtml(path[:m], path[m:])
with open(prefix + path[:m] + ".html", "w+", encoding="utf-8") as f:
f.write(data)
parse.feed(data)
def work(maxdeep):
args = ParseArgs()
cur_path.put(formatPath(args.path))
readed_path.put(formatPath(args.path))
parse = MyHttpParser()
with ProcessPoolExecutor(max_workers=4) as executor:
for i in range(args.deep):
size = cur_path.qsize()
futures = [executor.submit(doWork, cur_path.get()) for _ in range(size)]
for future in as_completed(futures):
try:
future.result()
except Exception as e:
print(e)
cur_path.queue.clear()
while not new_path.empty():
cur_path.put(new_path.get())
print(i)
if __name__ == '__main__':
work(5)
```
阅读全文