import urllib.request import re def download_html(url): header = { "User-Agent": "Mozilla/5.0(Windows NT 10.0;WOW64)" "AppleWebkit/537.36(KHTML,like Gecko)" "Chrome/76.0.3809.87 Safari/537.36 SLBrowser/6.0.1.8131 " } req = urllib.request.Request(url=url, headers=header) response = urllib.request.urlopen(req) html = response.read().decode("utf-8") return html def extract_url(html: object) -> object: pattern = 'http://movie.douban.com/subject/[0-9]+/' urls = re.findall(pattern, html) return set(urls) file = open('douban.txt', 'r') output = open('movie.txt', 'w') lines = file.readlines() for url in lines: url = url.strip() print(url) html = download_html(url) # print(html) output.write(html) urls = extract_url(html) # 此处有问题,功能未实现 print(urls) for url in urls: print(url) output.write(url + '\n') file.close() output.close()
时间: 2023-05-29 07:04:54 浏览: 213
这段代码是Python语言的一个爬虫程序,用于从指定的URL中下载HTML代码并提取出其中的电影页面URL。其中,程序使用urllib库和re库分别实现了下载HTML和提取URL的功能,还定义了一个提供User-Agent的header,用于模拟浏览器访问网站。最后,程序从文件中读取待爬取的URL,并将提取出的电影页面URL写入到一个新的文件中。
相关问题
帮我找一下这段代码的错误,并改正import urllib.parse import urllib.request def create_request(page): base_url = 'https://movie.douban.com/j/chart/top_list?type=5&interval_id=100%3A90&action=&' data = { 'start':(page - 1) * 20, 'limit':20 } data = urllib.parse.urlencode(data) url = base_url + data # print(url) headers = { # 'User - Agent': 'Mozilla / 5.0(Windows NT 10.0;WOW64) AppleWebKit / 537.36(KHTML, likeGecko) Chrome / 108.0.5359.95Safari / 537.36' 'User - Agent': 'Mozilla / 5.0(Windows NT 10.0;WOW64) AppleWebKit / 537.36(KHTML, likeGecko) Chrome / 108.0.5359.95Safari / 537.36' } request = urllib.request.Request(url=url,headers=headers) return request def get_content(request): response = urllib.request.urlopen(request) content = response.read().decode('utf-8') return content def down_load(page,content): with open ('douban_' + str(page) + '.json','w',encoding='utf-8') as fp: fp.write(content) if __name__ == '__main__': start_page = int (input('请输入开始的页码')) end_page = int (input('请输入结束的页码')) for page in range(start_page,end_page+1): # 每一页都有请求对象的定制 request = create_request(page) content = get_content(request) down_load(page,content)
该代码存在两个错误:
1. headers中的键名有空格,需要去掉空格。
2. down_load函数名错误,应为download。
改正后的代码如下:
```python
import urllib.parse
import urllib.request
def create_request(page):
base_url = 'https://movie.douban.com/j/chart/top_list?type=5&interval_id=100%3A90&action=&'
data = {
'start': (page - 1) * 20,
'limit': 20
}
data = urllib.parse.urlencode(data)
url = base_url + data
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.5359.95 Safari/537.36'
}
request = urllib.request.Request(url=url, headers=headers)
return request
def get_content(request):
response = urllib.request.urlopen(request)
content = response.read().decode('utf-8')
return content
def download(page, content):
with open('douban_' + str(page) + '.json', 'w', encoding='utf-8') as fp:
fp.write(content)
if __name__ == '__main__':
start_page = int(input('请输入开始的页码'))
end_page = int(input('请输入结束的页码'))
for page in range(start_page, end_page + 1):
request = create_request(page)
content = get_content(request)
download(page, content)
```
import urllib.request as urllib2from bs4 import BeautifulSoup as bsfrom urllib.error import URLError, HTTPError, ContentTooShortErrora = []def download(url): print("download..."+url) head = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36 Edg/111.0.1661.62"} request = urllib2.Request(url, headers=head) try: response = urllib2.urlopen(request) except(URLError, HTTPError, ContentTooShortError) as e: print("download error:"+str(e)) html = None return response.read().decode("UTF-8")def parser_html(response): soup = bs(response, 'html.parser') return soup# 打印信息def out_information(soup): # 记录爬取了多少数据 indexSpan = 1 # 打印出title标签的内容 print(soup.title) # 将a标签的信息存储在a列表里 for link in soup.find_all('a'): a.append(str(indexSpan) + link.text) indexSpan += 1 # 打印出a列表的内容 print(a)if __name__ == "__main__": # 需要访问的url url = "https://www.baidu.com/" # 下载url response = download(url) # 解析网页 soup = parser_html(response) # 打印内容 out_information(soup) 优化这段代码
这段代码已经很简洁了,但可以进一步优化:
1. 将导入的模块放在代码开头,便于查看和维护;
2. 在函数中使用异常处理来处理错误,提高代码的健壮性;
3. 将需要访问的url作为函数参数传入,提高代码的复用性。
优化后的代码如下:
```
import urllib.request as urllib2
from bs4 import BeautifulSoup as bs
from urllib.error import URLError, HTTPError, ContentTooShortError
def download(url):
print("download..." + url)
head = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36 Edg/111.0.1661.62"}
request = urllib2.Request(url, headers=head)
try:
response = urllib2.urlopen(request)
except (URLError, HTTPError, ContentTooShortError) as e:
print("download error:" + str(e))
return None
return response.read().decode("UTF-8")
def parser_html(response):
soup = bs(response, 'html.parser')
return soup
# 打印信息
def out_information(soup):
# 记录爬取了多少数据
indexSpan = 1
a = []
# 打印出title标签的内容
print(soup.title)
# 将a标签的信息存储在a列表里
for link in soup.find_all('a'):
a.append(str(indexSpan) + link.text)
indexSpan += 1
# 打印出a列表的内容
print(a)
if __name__ == "__main__":
# 需要访问的url
url = "https://www.baidu.com/"
# 下载url
response = download(url)
if response is not None:
# 解析网页
soup = parser_html(response)
# 打印内容
out_information(soup)
```
阅读全文