def get_jb(href, dic): res = requests.get(url=href, headers=headers) html = etree.HTML(res.text) a_list = html.xpath('//div[contains(@class,"fl category-txt")]/a') for a in a_list[1:]: dic1={} dic1=dic third = ''.join(a.xpath('./text()')) t_href = 'http://y.wksc.com'+''.join(a.xpath('./@href')) dic1['疾病'] = third try: get_brand(t_href, dic1) except: pass
时间: 2024-02-05 08:02:58 浏览: 110
'''模拟浏览器头部信息'''headers = 'User-Agent': 'Mozilla/5.0 (
urtle()
def draw_border():
border = turtle.Turtle()
border.hideturtle()
border.speed(0)
border.penup()
border.goto(-SCREEN_WIDTH // 2, -SCREEN_HEIGHT // 2)
border.pendown()
border.pens这段代码看起来也是Python的爬虫代码。它定义了一个名为`get_jb()`的函数,该ize(2)
border.fillcolor('white')
border.begin_fill()
for i in range(2):
border.forward(SCREEN函数接受两个参数:`href`和`dic`。`href`是一个链接,`dic`是一个字典_WIDTH)
border.left(90)
border.forward(SCREEN_HEIGHT)
border.left(90)
border.end_fill()
def set。函数首先使用`requests`模块发出一个GET请求,将其保存在名为`res`的变量中_food_position():
x = random.randint(-SCREEN_WIDTH // 2 // CELL_SIZE + 1, SCREEN_WIDTH // 2 // CELL_SIZE。然后,它使用`etree`模块解析HTML页面,并将结果保存在名为`html`的变量 - 1) * CELL_SIZE
y = random.randint(-SCREEN_HEIGHT // 2 // CELL_SIZE + 1, SCREEN_HEIGHT // 2 // CELL_SIZE - 1) * CELL_SIZE
food.goto(x, y)
def update_scoreboard():
score.clear()
中。接下来,它使用XPath表达式从HTML中选择所有包含`fl category-txt`类的`<div>` score.penup()
score.goto(0, SCREEN_HEIGHT // 2 - SCORE_FONT_SIZE - 10)
score.write('Score元素下的`<a>`元素,并将结果保存在名为`a_list`的变量中。然后,它: {}'.format(len(snake_parts) - 1), align='center', font=SCORE_FONT)
def move_up():
if snake.dy遍历`a_list`,并对于每个元素,创建一个名为`dic1`的新字典,并将 != CELL_SIZE:
snake.dx = 0
snake.dy = -CELL_SIZE
def move_down():
if snake.dy != -`dic`字典中的内容复制到`dic1`字典中。接下来,它从`a`元CELL_SIZE:
snake.dx = 0
snake.dy = CELL_SIZE
def move_left():
if snake.dx != CELL_SIZE:
snake.dx = -CELL_SIZE
snake.dy = 0
def move_right():
if snake.dx != -CELL_SIZE:
素中提取出一个名为`third`的字符串和一个名为`t_href`的链接,并将它们保存到` snake.dx = CELL_SIZE
snake.dy = 0
def move_snake():
x = snake.xcor()
y = snakedic1`字典中。最后,它调用名为`get_brand()`的函数,该函数似乎从链接`t_href`中获取一些数据并将其保存到`dic1`字典中。
阅读全文